Example #1
0
    def _create_controls(self):
        """ Create the default controls

        We create a Panel for each group of controls that we need to be able
        to show and hide separately.

        ** AR background and Spectrum efficiency compensation **

        These two controls are linked using VAs in the tab_data model.

        The controls are also linked to the VAs using event handlers, so that
        they can pass on their changing data.
        """

        # Panel containing information about the acquisition file
        self._pnl_acqfile = FileInfoSettingsController(
            self.tab_panel.fp_fileinfo, "No file loaded")
        wildcards, _ = formats_to_wildcards(
            odemis.dataio.get_available_formats(), include_all=True)
        # Panel with AR background file information
        # It's displayed only if there are AR streams (handled by the tab cont)
        self._pnl_arfile = FileInfoSettingsController(
            self.tab_panel.fp_fileinfo, "")
        self._arfile_ctrl = self._pnl_arfile.add_file_button(
            "AR background",
            tooltip="Angle-resolved background acquisition file",
            clearlabel="None",
            wildcard=wildcards).value_ctrl
        self._pnl_arfile.hide_panel()
        self._arfile_ctrl.Bind(EVT_FILE_SELECT, self._on_ar_file_select)
        self.tab_data.ar_cal.subscribe(self._on_ar_cal, init=True)

        # Panel with spectrum background + efficiency compensation file information
        # They are displayed only if there are Spectrum streams
        self._pnl_specfile = FileInfoSettingsController(
            self.tab_panel.fp_fileinfo, "")
        self._spec_bckfile_ctrl = self._pnl_specfile.add_file_button(
            "Spec. background",
            tooltip="Spectrum background correction file",
            clearlabel="None",
            wildcard=wildcards).value_ctrl
        self._spec_bckfile_ctrl.Bind(EVT_FILE_SELECT,
                                     self._on_spec_bck_file_select)
        self.tab_data.spec_bck_cal.subscribe(self._on_spec_bck_cal, init=True)

        self._specfile_ctrl = self._pnl_specfile.add_file_button(
            "Spec. correction",
            tooltip="Spectrum efficiency correction file",
            clearlabel="None",
            wildcard=wildcards).value_ctrl
        self._pnl_specfile.hide_panel()
        self._specfile_ctrl.Bind(EVT_FILE_SELECT, self._on_spec_file_select)
        self.tab_data.spec_cal.subscribe(self._on_spec_cal, init=True)

        self.tab_panel.fp_fileinfo.expand()
Example #2
0
    def _get_snapshot_info(self, dialog=False):
        config = conf.get_acqui_conf()

        tab, filepath, exporter = self._main_data_model.tab.value, None, None

        if dialog:
            format_info = get_available_formats()
            wildcards, formats = formats_to_wildcards(format_info)
            # The default file name should be empty because otherwise the
            # dialog will add an extension that won't change when the user
            # selects a different file type in the dialog.
            dlg = wx.FileDialog(self._main_frame,
                                "Save Snapshot",
                                config.last_path,
                                "",
                                wildcard=wildcards,
                                style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)

            # Select the last format used
            try:
                idx = formats.index(config.last_format)
            except ValueError:
                idx = 0
            dlg.SetFilterIndex(idx)

            if dlg.ShowModal() == wx.ID_OK:
                path = dlg.GetPath()
                fmt = formats[dlg.GetFilterIndex()]
                extension = format_info[fmt][0]

                # Prevent double extensions when an old file is selected
                filepath, _ = os.path.splitext(path)
                filepath = filepath + extension

                config.last_path = os.path.dirname(path)
                config.last_format = fmt
                config.last_extension = extension
                config.write()
                exporter = dataio.get_exporter(config.last_format)

            dlg.Destroy()
        else:
            extension = config.last_extension
            dirname = get_picture_folder()
            basename = time.strftime("%Y%m%d-%H%M%S", time.localtime())
            filepath = os.path.join(dirname, basename + extension)
            exporter = dataio.get_exporter(config.last_format)

            if os.path.exists(filepath):
                msg = "File '%s' already exists, cancelling snapshot"
                logging.warning(msg, filepath)
                tab, filepath, exporter = None, None, None

        return tab, filepath, exporter
Example #3
0
    def open_image(self, dlg):
        tab = self.main_app.main_data.getTabByName("analysis")
        tab_data = tab.tab_data_model
        fi = tab_data.acq_fileinfo.value

        if fi and fi.file_name:
            path, _ = os.path.split(fi.file_name)
        else:
            config = get_acqui_conf()
            path = config.last_path

        # Find the available formats (and corresponding extensions)
        formats_to_ext = dataio.get_available_formats(os.O_RDONLY)
        wildcards, formats = guiutil.formats_to_wildcards(formats_to_ext,
                                                          include_all=True)
        dialog = wx.FileDialog(dlg,
                               message="Choose a file to load",
                               defaultDir=path,
                               defaultFile="",
                               style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST,
                               wildcard=wildcards)

        # Show the dialog and check whether is was accepted or cancelled
        if dialog.ShowModal() != wx.ID_OK:
            return None

        # Detect the format to use
        filename = dialog.GetPath()

        data = udataio.open_acquisition(filename)[0]
        try:
            data = self._ensureGrayscale(data)
        except ValueError as ex:
            box = wx.MessageDialog(dlg, str(ex), "Failed to open image",
                                   wx.OK | wx.ICON_STOP)
            box.ShowModal()
            box.Destroy()
            return None

        self.crop_top.range = (0, data.shape[0] // 2)
        self.crop_bottom.range = (0, data.shape[0] // 2)
        self.crop_left.range = (0, data.shape[1] // 2)
        self.crop_right.range = (0, data.shape[1] // 2)

        data.metadata[model.MD_POS] = (0, 0)
        data.metadata[model.MD_PIXEL_SIZE] = (1e-9, 1e-9)

        basename = os.path.splitext(os.path.split(filename)[1])[0]
        return stream.StaticSEMStream(basename, data)
Example #4
0
    def _create_controls(self):
        """ Create the default controls

        We create a Panel for each group of controls that we need to be able
        to show and hide separately.

        ** AR background and Spectrum efficiency compensation **

        These two controls are linked using VAs in the tab_data model.

        The controls are also linked to the VAs using event handlers, so that
        they can pass on their changing data.
        """

        # Panel containing information about the acquisition file
        self._pnl_acqfile = FileInfoSettingsController(self.tab_panel.fp_fileinfo, "No file loaded")
        wildcards, _ = formats_to_wildcards(odemis.dataio.get_available_formats(),
                                                            include_all=True)
        # Panel with AR background file information
        # It's displayed only if there are AR streams (handled by the tab cont)
        self._pnl_arfile = FileInfoSettingsController(self.tab_panel.fp_fileinfo, "")
        self._arfile_ctrl = self._pnl_arfile.add_file_button(
            "AR background",
            tooltip="Angle-resolved background acquisition file",
            clearlabel="None", wildcard=wildcards).value_ctrl
        self._pnl_arfile.hide_panel()
        self._arfile_ctrl.Bind(EVT_FILE_SELECT, self._on_ar_file_select)
        self.tab_data.ar_cal.subscribe(self._on_ar_cal, init=True)

        # Panel with spectrum background + efficiency compensation file information
        # They are displayed only if there are Spectrum streams
        self._pnl_specfile = FileInfoSettingsController(self.tab_panel.fp_fileinfo, "")
        self._spec_bckfile_ctrl = self._pnl_specfile.add_file_button(
            "Spec. background",
            tooltip="Spectrum background correction file",
            clearlabel="None", wildcard=wildcards).value_ctrl
        self._spec_bckfile_ctrl.Bind(EVT_FILE_SELECT, self._on_spec_bck_file_select)
        self.tab_data.spec_bck_cal.subscribe(self._on_spec_bck_cal, init=True)

        self._specfile_ctrl = self._pnl_specfile.add_file_button(
            "Spec. correction",
            tooltip="Spectrum efficiency correction file",
            clearlabel="None", wildcard=wildcards).value_ctrl
        self._pnl_specfile.hide_panel()
        self._specfile_ctrl.Bind(EVT_FILE_SELECT, self._on_spec_file_select)
        self.tab_data.spec_cal.subscribe(self._on_spec_cal, init=True)

        self.tab_panel.fp_fileinfo.expand()
Example #5
0
    def open_image(self, dlg):
        tab = self.main_app.main_data.getTabByName("analysis")
        tab_data = tab.tab_data_model
        fi = tab_data.acq_fileinfo.value

        if fi and fi.file_name:
            path, _ = os.path.split(fi.file_name)
        else:
            config = get_acqui_conf()
            path = config.last_path

        # Find the available formats (and corresponding extensions)
        formats_to_ext = dataio.get_available_formats(os.O_RDONLY)
        wildcards, formats = guiutil.formats_to_wildcards(formats_to_ext, include_all=True)
        dialog = wx.FileDialog(dlg,
                               message="Choose a file to load",
                               defaultDir=path,
                               defaultFile="",
                               style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST,
                               wildcard=wildcards)

        # Show the dialog and check whether is was accepted or cancelled
        if dialog.ShowModal() != wx.ID_OK:
            return None

        # Detect the format to use
        filename = dialog.GetPath()

        data = udataio.open_acquisition(filename)[0]
        try:
            data = self._ensureGrayscale(data)
        except ValueError as ex:
            box = wx.MessageDialog(dlg, str(ex), "Failed to open image",
                                   wx.OK | wx.ICON_STOP)
            box.ShowModal()
            box.Destroy()
            return None

        self.crop_top.range = (0, data.shape[0] // 2)
        self.crop_bottom.range = (0, data.shape[0] // 2)
        self.crop_left.range = (0, data.shape[1] // 2)
        self.crop_right.range = (0, data.shape[1] // 2)

        data.metadata[model.MD_POS] = (0, 0)
        data.metadata[model.MD_PIXEL_SIZE] = (1e-9, 1e-9)

        basename = os.path.splitext(os.path.split(filename)[1])[0]
        return stream.StaticSEMStream(basename, data)
Example #6
0
    def ShowExportFileDialog(self, filename, default_exporter):
        """
        filename (string): full filename to propose by default
        default_exporter (module): default exporter to be used
        return (string or None): the new filename (or the None if the user cancelled)
                (string): the format name
                (string): spatial, AR or spectrum
        """
        # Find the available formats (and corresponding extensions) according
        # to the export type
        export_type = self.get_export_type(self._data_model.focussedView.value)
        formats_to_ext = self.get_export_formats(export_type)

        # current filename
        path, base = os.path.split(filename)
        wildcards, formats = formats_to_wildcards(formats_to_ext, suffix="")
        dialog = wx.FileDialog(self._main_frame,
                               message="Choose a filename and destination",
                               defaultDir=path,
                               defaultFile="",
                               style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT,
                               wildcard=wildcards)

        # just default to the first format in EXPORTS[export_type]
        default_fmt = default_exporter.FORMAT
        try:
            idx = formats.index(default_fmt)
        except ValueError:
            idx = 0
        dialog.SetFilterIndex(idx)

        # Strip the extension, so that if the user changes the file format,
        # it will not have 2 extensions in a row.
        if base.endswith(default_exporter.EXTENSIONS[0]):
            base = base[:-len(default_exporter.EXTENSIONS[0])]
        dialog.SetFilename(base)

        # Show the dialog and check whether is was accepted or cancelled
        if dialog.ShowModal() != wx.ID_OK:
            return None, default_fmt, export_type

        # New location and name have been selected...
        # Store the path
        path = dialog.GetDirectory()

        # Store the format
        fmt = formats[dialog.GetFilterIndex()]

        # Check the filename has a good extension, or add the default one
        fn = dialog.GetFilename()
        ext = None
        for extension in formats_to_ext[fmt]:
            if fn.endswith(extension) and len(extension) > len(ext or ""):
                ext = extension

        if ext is None:
            if fmt == default_fmt and default_exporter.EXTENSIONS[0] in formats_to_ext[fmt]:
                # if the format is the same (and extension is compatible): keep
                # the extension. This avoid changing the extension if it's not
                # the default one.
                ext = default_exporter.EXTENSIONS[0]
            else:
                ext = formats_to_ext[fmt][0]  # default extension
            fn += ext

        return os.path.join(path, fn), fmt, export_type
Example #7
0
class SRAcqPlugin(Plugin):
    name = "Super-resolution acquisition"
    __version__ = "1.1"
    __author__ = u"Éric Piel"
    __license__ = "Public domain"

    # Describe how the values should be displayed
    # See odemis.gui.conf.data for all the possibilities
    vaconf = OrderedDict((
        ("number", {
            "label": "Number of frames",
            "tooltip": "Number of frames acquired",
            "control_type": gui.CONTROL_INT,  # no slider
            "accuracy": None,
        }),
        ("countConvertWavelength", {
            "label": "Emission wavelength",
            "tooltip": "Light wavelength received by the camera for count conversion.",
            "control_type": gui.CONTROL_FLT,
        }),
        ("exposureTime", {
            "control_type": gui.CONTROL_SLIDER,
            "scale": "log",
            "range": (0.001, 10.0),
            "type": "float",
            "accuracy": 2,
        }),
        ("binning", {
            "control_type": gui.CONTROL_RADIO,
            "tooltip": "Number of pixels combined",
#             "choices": conf.util.binning_1d_from_2d,
        }),
        ("resolution", {
            "control_type": gui.CONTROL_COMBO,
            "tooltip": "Number of pixels in the image",
            "accuracy": None,  # never simplify the numbers
#             "choices": conf.util.resolution_from_range,
        }),
        ("gain", {}),
        ("emGain", {
            "label": "EMCCD gain",
            "tooltip": "None means automatic selection based on the gain and readout rate.",
        }),
        ("readoutRate", {}),
        ("verticalReadoutRate", {
            "tooltip": "NoneHz means automatically picks the fastest recommended clock."
        }),
        ("verticalClockVoltage", {
            "tooltip": "At higher vertical readout rate, voltage must be increased, \n"
                       "but it might introduce extra noise. 0 means standard voltage.",
        }),
        ("temperature", {}),
        ("filename", {
            "tooltip": "Each acquisition will be saved with the name and the number appended.",
            "control_type": gui.CONTROL_SAVE_FILE,
            "wildcard": formats_to_wildcards(get_available_formats(os.O_WRONLY))[0],
        }),
        ("expectedDuration", {
        }),
    ))

    def __init__(self, microscope, main_app):
        super(SRAcqPlugin, self).__init__(microscope, main_app)
        # Can only be used with a microscope
        if not microscope:
            return

        # Check if the microscope is a SECOM
        main_data = self.main_app.main_data
        if not main_data.ccd or not main_data.light:
            return
        self.light = main_data.light
        self.ccd = main_data.ccd

        self.addMenu("Acquisition/Super-resolution...", self.start)

        # Add the useful VAs which are available on the CCD.
        # (on an iXon, they should all be there)
        for n in ("exposureTime", "resolution", "binning", "gain", "emGain",
                  "countConvertWavelength", "temperature",
                  "readoutRate", "verticalReadoutRate", "verticalClockVoltage"):
            if model.hasVA(self.ccd, n):
                va = getattr(self.ccd, n)
                setattr(self, n, va)

        # Trick to pass the component (ccd to binning_1d_from_2d())
        self.vaconf["binning"]["choices"] = (lambda cp, va, cf:
                       gui.conf.util.binning_1d_from_2d(self.ccd, va, cf))
        self.vaconf["resolution"]["choices"] = (lambda cp, va, cf:
                       gui.conf.util.resolution_from_range(self.ccd, va, cf))

        self.number = model.IntContinuous(1000, (1, 1000000))

        self.filename = model.StringVA("a.tiff")
        self.filename.subscribe(self._on_filename)

        self.expectedDuration = model.VigilantAttribute(1, unit="s", readonly=True)
        self.number.subscribe(self._update_exp_dur)
        self.exposureTime.subscribe(self._update_exp_dur)

        # Create a stream to show the settings changes
        self._stream = stream.FluoStream(
            "Filtered colour",
            self.ccd,
            self.ccd.data,
            emitter=main_data.light,
            em_filter=main_data.light_filter,
            focuser=main_data.focus,
        )

        # For the acquisition
        self._acq_done = threading.Event()
        self._n = 0
        self._startt = 0  # starting time of acquisition
        self._last_display = 0  # last time the GUI image was updated
        self._future = None  # future to represent the acquisition progress
        self._exporter = None  # to save the file

        self._q = queue.Queue()  # queue of tuples (str, DataArray) for saving data
        self._qdisplay = queue.Queue()
        # TODO: find the right number of threads, based on CPU numbers (but with
        # python threading that might be a bit overkill)
        for i in range(4):
            t = threading.Thread(target=self._saving_thread, args=(i,))
            t.daemon = True
            t.start()

    def _get_new_filename(self):
        conf = get_acqui_conf()
        return os.path.join(
            conf.last_path,
            u"%s%s" % (time.strftime("sr-%Y%m%d-%H%M%S"), ".tiff")
        )

    def _on_filename(self, fn):
        # Make the name "fn" -> "fn-XXXXXX.ext"
        bn, ext = os.path.splitext(fn)
        self._fntmpl = bn + "-%06d" + ext
        if not ext.endswith(".tiff"):
            logging.warning("Only TIFF format is recommended to use")

        # Store the directory so that next filename is in the same place
        conf = get_acqui_conf()
        p, bn = os.path.split(fn)
        if p:
            conf.last_path = p

    def _update_exp_dur(self, _=None):
        """
        Called when VA that affects the expected duration is changed
        """
        # On the Andor iXon, in frame transfer mode, the readout is done while
        # the next frame is exposed. So only exposure time counts
        tott = self.exposureTime.value * self.number.value + 0.1

        # Use _set_value as it's read only
        self.expectedDuration._set_value(tott, force_write=True)

    def start(self):
        """
        Called when the menu entry is selected
        """
        main_data = self.main_app.main_data

        # Stop the streams
        tab_data = main_data.tab.value.tab_data_model
        for s in tab_data.streams.value:
            s.should_update.value = False

        self.filename.value = self._get_new_filename()
        self._update_exp_dur()

        # Special CCD settings to get values as photon counting
        if model.hasVA(self.ccd, "countConvert"):
            self.ccd.countConvert.value = 2  # photons

        dlg = AcquisitionDialog(self, "Super-resolution acquisition",
                                "Acquires a series of shortly exposed images, "
                                "and store them in sequence.\n"
                                "Note, the advanced settings are only applied "
                                "after restarting the stream.")
        dlg.addStream(self._stream)
        dlg.addSettings(self, self.vaconf)
        dlg.addButton("Close")
        dlg.addButton("Acquire", self.acquire, face_colour='blue')
        dlg.Maximize()
        ans = dlg.ShowModal()

        # Make sure the stream is not playing anymore and CCD is back to normal
        self._stream.should_update.value = False
        if model.hasVA(self.ccd, "countConvert"):
            try:
                self.ccd.countConvert.value = 0  # normal
            except Exception:
                logging.exception("Failed to set back count convert mode")

        if ans == 0:
            logging.info("Acquisition cancelled")
        elif ans == 1:
            logging.info("Acquisition completed")
        else:
            logging.warning("Got unknown return code %s", ans)

        dlg.Destroy()

    def acquire(self, dlg):
        # Make sure the stream is not playing
        self._stream.should_update.value = False

        self._exporter = dataio.find_fittest_converter(self.filename.value)

        nb = self.number.value
        self._n = 0
        self._acq_done.clear()

        self._startt = time.time()
        self._last_display = self._startt
        end = self._startt + self.expectedDuration.value

        f = model.ProgressiveFuture(end=end)
        f.task_canceller = lambda l: True  # To allow cancelling while it's running
        f.set_running_or_notify_cancel()  # Indicate the work is starting now
        self._future = f
        dlg.showProgress(f)

        try:
            # Special CCD settings to get values as photon counting
            if model.hasVA(self.ccd, "countConvert"):
                self.ccd.countConvert.value = 2  # photons

            # Switch on laser (at the right wavelength and power)
            self._stream._setup_emission()
            self._stream._setup_excitation()

            # Let it start!
            self.ccd.data.subscribe(self._on_image)

            # Wait for the complete acquisition to be done
            while not self._acq_done.wait(1):
                # Update the progress bar
                left = nb - self._n
                dur = self.exposureTime.value * left + 0.1
                f.set_progress(end=time.time() + dur)

                # Update the image
                try:
                    da = self._qdisplay.get(block=False)
                    # Hack: we pretend the stream has received an image it was
                    # subscribed to (although it's paused)
                    self._stream._onNewData(None, da)
                except queue.Empty:
                    pass

            logging.info("Waiting for all data to be saved")
            dur = self._q.qsize() * 0.1  # very pessimistic
            f.set_progress(end=time.time() + dur)
            self._q.join()

            if f.cancelled():
                logging.debug("Acquisition cancelled")
                return
        except Exception as ex:
            self.ccd.data.unsubscribe(self._on_image)
            # TODO: write this in the window
            logging.exception("Failure during SR acquisition")
            f.set_exception(ex)
            return
        finally:
            # Revert CCD count to normal behaviour
            if model.hasVA(self.ccd, "countConvert"):
                try:
                    self.ccd.countConvert.value = 0  # normal
                except Exception:
                    logging.exception("Failed to set back count convert mode")

        f.set_result(None)  # Indicate it's over
        fps = nb / (time.time() - self._startt)
        logging.info("Finished with average %g fps", fps)

        dlg.Close()

    def _on_image(self, df, data):
        """
        Called for each new image
        """
        try:
            self._n += 1
            self._q.put((self._n, data))
            now = time.time()
            fps = self._n / (now - self._startt)
            logging.info("Received data %d (%g fps), queue size = %d",
                         self._n, fps, self._q.qsize())

            if self._q.qsize() > 8:
                logging.warning("Saving queue is behind acquisition")
            # TODO: if queue size too long => pause until it's all processed

            if self._future.cancelled():
                logging.info("Stopping early due to cancellation")
                self.ccd.data.unsubscribe(self._on_image)
                self._acq_done.set()  # indicate it's over
                return

            if now > self._last_display + LIVE_UPDATE_PERIOD:
                if not self._qdisplay.qsize():
                    self._qdisplay.put(data)
                else:
                    logging.debug("Not pushing new image to display as previous one hasn't been processed")

            if self._n == self.number.value:
                self.ccd.data.unsubscribe(self._on_image)
                self._acq_done.set()  # indicate it's over
        except Exception as ex:
            logging.exception("Failure to save acquisition %d", self._n)
            self._future.set_exception(ex)
            self.ccd.data.unsubscribe(self._on_image)
            self._acq_done.set()  # indicate it's over

    def _saving_thread(self, i):
        try:
            while True:
                n, da = self._q.get()
                logging.info("Saving data %d in thread %d", n, i)
                filename = self._fntmpl % (n,)
                try:
                    self._exporter.export(filename, da, compressed=True)
                except Exception:
                    logging.exception("Failed to store data %d", n)
                self._q.task_done()
                logging.debug("Data %d saved", n)
        except Exception:
            logging.exception("Failure in the saving thread")
Example #8
0
class CLAcqPlugin(Plugin):
    """
    This is a script to acquire a set of optical images from the detector (ccd) for various e-beam
    spots on the sample along a grid. Can also be used as a plugin.
    """
    name = "CL acquisition for SECOM"
    __version__ = "2.0"
    __author__ = u"Éric Piel, Lennard Voortman, Sabrina Rossberger"
    __license__ = "Public domain"

    # Describe how the values should be displayed
    # See odemis.gui.conf.data for all the possibilities
    vaconf = OrderedDict((
        ("repetition", {}),
        ("pixelSize", {}),
        ("exposureTime", {
            "range": (1e-6, 180),
            "scale": "log",
        }),
        ("binning", {
            "control_type": gui.CONTROL_RADIO,
        }),
        ("roi_margin", {
            "label": "ROI margin",
            "tooltip": "Extra space around the SEM area to store on the CCD"
        }),
        ("filename", {
            "control_type": gui.CONTROL_SAVE_FILE,
            "wildcard": formats_to_wildcards({tiff.FORMAT:
                                              tiff.EXTENSIONS})[0],
        }),
        (
            "period",
            {
                "label": "Drift corr. period",
                "tooltip":
                u"Maximum time after running a drift correction (anchor region acquisition)",
                "control_type": gui.CONTROL_SLIDER,
                "scale": "log",
                "range":
                (1,
                 300),  # s, the VA allows a wider range, not typically needed
                "accuracy": 2,
            }),
        ("tool", {
            "label": "Selection tools",
            "control_type": gui.CONTROL_RADIO,
            "choices": {
                TOOL_NONE: u"drag",
                TOOL_ROA: u"ROA",
                TOOL_RO_ANCHOR: u"drift"
            },
        }),
        ("expectedDuration", {}),
    ))

    def __init__(self, microscope, main_app):
        """
        :param microscope: (Microscope or None) The main back-end component.
        :param main_app: (wx.App) The main GUI component.
        """
        super(CLAcqPlugin, self).__init__(microscope, main_app)

        # Can only be used with a microscope
        if not microscope:
            return
        else:
            # Check which stream the microscope supports
            self.main_data = self.main_app.main_data
            if not (self.main_data.ccd and self.main_data.ebeam):
                return

        self.exposureTime = self.main_data.ccd.exposureTime
        self.binning = self.main_data.ccd.binning
        # Trick to pass the component (ccd to binning_1d_from_2d())
        self.vaconf["binning"]["choices"] = (
            lambda cp, va, cf: cutil.binning_1d_from_2d(
                self.main_data.ccd, va, cf))

        self._survey_stream = None
        self._optical_stream = acqstream.BrightfieldStream(
            "Optical",
            self.main_data.ccd,
            self.main_data.ccd.data,
            emitter=None,
            focuser=self.main_data.focus)
        self._secom_cl_stream = SECOMCLSettingsStream("Secom-CL",
                                                      self.main_data.ccd,
                                                      self.main_data.ccd.data,
                                                      self.main_data.ebeam)
        self._sem_stream = acqstream.SEMStream(
            "Secondary electrons concurrent", self.main_data.sed,
            self.main_data.sed.data, self.main_data.ebeam)

        self._secom_sem_cl_stream = SECOMCLSEMMDStream(
            "SECOM SEM CL", [self._sem_stream, self._secom_cl_stream])

        self._driftCorrector = leech.AnchorDriftCorrector(
            self.main_data.ebeam, self.main_data.sed)

        self.conf = get_acqui_conf()
        self.expectedDuration = model.VigilantAttribute(1,
                                                        unit="s",
                                                        readonly=True)
        self.exposureTime.subscribe(self._update_exp_dur)

        self.filename = self._secom_sem_cl_stream.filename  # duplicate VA
        self.filename.subscribe(self._on_filename)

        self.addMenu("Acquisition/CL acquisition...", self.start)

    def _on_filename(self, fn):
        """
        Store path and pattern in conf file.
        :param fn: (str) The filename to be stored.
        """
        # Store the directory so that next filename is in the same place
        p, bn = os.path.split(fn)
        if p:
            self.conf.last_path = p

        # Save pattern
        self.conf.fn_ptn, self.conf.fn_count = guess_pattern(fn)

    def _update_filename(self):
        """
        Set filename from pattern in conf file.
        """
        fn = create_filename(self.conf.last_path, self.conf.fn_ptn, '.tiff',
                             self.conf.fn_count)
        self.conf.fn_count = update_counter(self.conf.fn_count)

        # Update the widget, without updating the pattern and counter again
        self.filename.unsubscribe(self._on_filename)
        self.filename.value = fn
        self.filename.subscribe(self._on_filename)

    def _get_sem_survey(self):
        """
        Finds the SEM stream in the acquisition tab.
        :returns: (SEMStream or None) None if not found.
        """
        tab_data = self.main_app.main_data.tab.value.tab_data_model
        for s in tab_data.streams.value:
            if isinstance(s, acqstream.SEMStream):
                return s

        logging.warning("No SEM stream found")
        return None

    def _update_exp_dur(self, _=None):
        """
        Called when VA that affects the expected duration is changed.
        """
        if self._survey_stream is None:
            return

        strs = [self._survey_stream, self._secom_sem_cl_stream]

        dur = acqmng.estimateTime(strs)
        logging.debug("Estimating %g s acquisition for %d streams", dur,
                      len(strs))
        # Use _set_value as it's read only
        self.expectedDuration._set_value(math.ceil(dur), force_write=True)

    def _on_dc_roi(self, roi):
        """
        Called when the Anchor region changes.
        Used to enable/disable the drift correction period control.
        :param roi: (4 x 0<=float<=1) The anchor region selected (tlbr).
        """
        enabled = (roi != acqstream.UNDEFINED_ROI)

        # The driftCorrector should be a leech if drift correction is enabled
        dc = self._driftCorrector
        if enabled:
            if dc not in self._sem_stream.leeches:
                self._sem_stream.leeches.append(dc)
        else:
            try:
                self._sem_stream.leeches.remove(dc)
            except ValueError:
                pass  # It was already not there

    @call_in_wx_main
    def _on_rep(self, rep):
        """
        Force the ROI in the canvas to show the e-beam positions.
        :param rep: (int, int) The repetition (e-beam positions) to be displayed.
        """
        self._dlg.viewport_l.canvas.show_repetition(
            rep, RepetitionSelectOverlay.FILL_POINT)

    def start(self):
        """
        Displays the plugin window.
        """
        self._update_filename()
        str_ctrl = self.main_app.main_data.tab.value.streambar_controller
        str_ctrl.pauseStreams()

        dlg = AcquisitionDialog(
            self, "CL acquisition",
            "Acquires a CCD image for each e-beam spot.\n")
        self._dlg = dlg
        self._survey_stream = self._get_sem_survey()

        dlg.SetSize((1500, 1000))

        # Hack to force the canvas to have a region of acquisition (ROA) and anchor region (drift) overlay.
        dlg._dmodel.tool.choices = {
            TOOL_NONE,
            TOOL_ROA,
            TOOL_RO_ANCHOR,
        }

        dlg._dmodel.roa = self._secom_cl_stream.roi  # region of acquisition selected (x_tl, y_tl, x_br, y_br)
        dlg._dmodel.fovComp = self.main_data.ebeam  # size (x, y) of sem image for given magnification
        dlg._dmodel.driftCorrector = self._driftCorrector
        dlg.viewport_l.canvas.view = None
        dlg.viewport_l.canvas.setView(dlg.view, dlg._dmodel)
        dlg.viewport_r.canvas.allowed_modes = {}
        dlg.viewport_r.canvas.view = None
        dlg.viewport_r.canvas.setView(dlg.view_r, dlg._dmodel)

        self.repetition = self._secom_cl_stream.repetition  # ebeam positions to acquire
        self.repetition.subscribe(self._on_rep, init=True)
        self.pixelSize = self._secom_cl_stream.pixelSize  # pixel size per ebeam pos
        self.roi_margin = self._secom_cl_stream.roi_margin
        self.period = self._driftCorrector.period  # time between to drift corrections
        self.tool = dlg._dmodel.tool  # tools to select ROA and anchor region for drift correction
        self._driftCorrector.roi.subscribe(self._on_dc_roi, init=True)

        # subscribe to update estimated acquisition time
        self.repetition.subscribe(self._update_exp_dur, init=True)
        self.period.subscribe(self._update_exp_dur)
        self._driftCorrector.roi.subscribe(self._update_exp_dur)

        dlg.addSettings(self, self.vaconf)
        dlg.addStream(self._survey_stream)
        dlg.addStream(self._optical_stream)

        dlg.addButton("Cancel")
        dlg.addButton("Acquire", self._acquire, face_colour='blue')

        ans = dlg.ShowModal()

        if ans == 0:
            logging.info("Acquisition cancelled")
        elif ans == 1:
            logging.info("Acquisition completed")
        else:
            logging.warning("Got unknown return code %s", ans)

        self._dlg = None
        self._survey_stream = None
        dlg.Destroy()

    def save_hw_settings(self):
        """
        Saves the current e-beam settings (only e-beam!).
        """
        res = self.main_data.ebeam.resolution.value
        scale = self.main_data.ebeam.scale.value
        trans = self.main_data.ebeam.translation.value
        dt = self.main_data.ebeam.dwellTime.value
        self._hw_settings = (res, scale, trans, dt)

    def resume_hw_settings(self):
        """
        Restores the saved e-beam settings.
        """
        res, scale, trans, dt = self._hw_settings

        # order matters!
        self.main_data.ebeam.scale.value = scale
        self.main_data.ebeam.resolution.value = res
        self.main_data.ebeam.translation.value = trans
        self.main_data.ebeam.dwellTime.value = dt

    def _acquire(self, dlg):
        """
        Starts the synchronized acquisition, pauses the currently playing streams and exports the
        acquired SEM data. Opens the survey, concurrent and first optical image in the analysis tab.
        :param dlg: (AcquisitionDialog) The plugin window.
        """
        self._dlg.streambar_controller.pauseStreams()
        self.save_hw_settings()

        self.fns = []

        strs = [self._survey_stream, self._secom_sem_cl_stream]

        fn = self.filename.value
        fn_prefix, fn_ext = os.path.splitext(self.filename.value)

        try:
            f = acqmng.acquire(strs, self.main_app.main_data.settings_obs)
            dlg.showProgress(f)
            das, e = f.result(
            )  # blocks until all the acquisitions are finished
        except CancelledError:
            pass
        finally:
            self.resume_hw_settings()

        if not f.cancelled() and das:
            if e:
                logging.warning("SECOM CL acquisition failed: %s", e)
            logging.debug("Will save CL data to %s", fn)

            # export the SEM images
            self.save_data(das,
                           prefix=fn_prefix,
                           xres=self.repetition.value[0],
                           yres=self.repetition.value[1],
                           xstepsize=self.pixelSize.value[0] * 1e9,
                           ystepsize=self.pixelSize.value[1] * 1e9,
                           idx=0)

            # Open analysis tab, with 3 files
            self.showAcquisition(self._secom_sem_cl_stream.firstOptImg)
            analysis_tab = self.main_data.getTabByName('analysis')
            for fn_img in self.fns:
                analysis_tab.load_data(fn_img, extend=True)

        dlg.Close()

    def save_data(self, data, **kwargs):
        """
        Saves the data into a file.
        :param data: (model.DataArray or list of model.DataArray) The data to save.
        :param kwargs: (dict (str->value)) Values to substitute in the file name.
        """
        # export to single tiff files
        exporter = dataio.get_converter(FMT)

        for d in data[:
                      2]:  # only care about the sem ones, the optical images are already saved
            if d.metadata.get(model.MD_DESCRIPTION) == "Anchor region":
                kwargs["type"] = "drift"
            elif d.metadata.get(
                    model.MD_DESCRIPTION) == "Secondary electrons concurrent":
                kwargs["type"] = "concurrent"
            else:
                kwargs["type"] = "survey"

            kwargs["xpos"] = 0
            kwargs["ypos"] = 0
            fn = FN_FMT % kwargs

            # The data is normally ordered: survey, concurrent, drift
            # => first 2 files are the ones we care about
            if kwargs["idx"] < 2:
                self.fns.append(fn)

            if os.path.exists(fn):
                # mostly to warn if multiple ypos/xpos are rounded to the same value
                logging.warning("Overwriting file '%s'.", fn)
            else:
                logging.info("Saving file '%s", fn)

            exporter.export(fn, d)
            kwargs["idx"] += 1
Example #9
0
class QuickCLPlugin(Plugin):
    name = "Quick CL"
    __version__ = "1.1"
    __author__ = u"Éric Piel"
    __license__ = "GPLv2"

    # Describe how the values should be displayed
    # See odemis.gui.conf.data for all the possibilities
    vaconf = OrderedDict((
        ("filename", {
            "tooltip":
            "Each acquisition will be saved with the name and the number appended.",
            "control_type": gui.CONTROL_SAVE_FILE,
            "wildcard": formats_to_wildcards({png.FORMAT: png.EXTENSIONS})[0],
        }),
        ("hasDatabar", {
            "label": "Include data-bar",
        }),
        ("logScale", {
            "label": "Logarithmic scale",
        }),
        ("expectedDuration", {}),
    ))

    def __init__(self, microscope, main_app):
        super(QuickCLPlugin, self).__init__(microscope, main_app)
        # Can only be used with a SPARC with CL detector (or monochromator)
        if not microscope:
            return
        main_data = self.main_app.main_data
        if not main_data.ebeam or not (main_data.cld
                                       or main_data.monochromator):
            return

        self.conf = get_acqui_conf()
        self.filename = model.StringVA("")
        self.filename.subscribe(self._on_filename)

        self.expectedDuration = model.VigilantAttribute(1,
                                                        unit="s",
                                                        readonly=True)

        self.hasDatabar = model.BooleanVA(False)

        # Only put the VAs that do directly define the image as local, everything
        # else should be global. The advantage is double: the global VAs will
        # set the hardware even if another stream (also using the e-beam) is
        # currently playing, and if the VAs are changed externally, the settings
        # will be displayed correctly (and not reset the values on next play).
        emtvas = set()
        hwemtvas = set()
        for vaname in get_local_vas(main_data.ebeam,
                                    main_data.hw_settings_config):
            if vaname in ("resolution", "dwellTime", "scale"):
                emtvas.add(vaname)
            else:
                hwemtvas.add(vaname)

        self._sem_stream = stream.SEMStream(
            "Secondary electrons",
            main_data.sed,
            main_data.sed.data,
            main_data.ebeam,
            focuser=main_data.ebeam_focus,
            hwemtvas=hwemtvas,
            hwdetvas=None,
            emtvas=emtvas,
            detvas=get_local_vas(main_data.sed, main_data.hw_settings_config),
        )

        # This stream is used both for rendering and acquisition.
        # LiveCLStream is more or less like a SEMStream, but ensures the icon in
        # the merge slider is correct, and provide a few extra.
        if main_data.cld:
            self._cl_stream = LiveCLStream(
                "CL intensity",
                main_data.cld,
                main_data.cld.data,
                main_data.ebeam,
                focuser=main_data.ebeam_focus,
                emtvas=emtvas,
                detvas=get_local_vas(main_data.cld,
                                     main_data.hw_settings_config),
                opm=main_data.opm,
            )
            # TODO: allow to type in the resolution of the CL?
            # TODO: add the cl-filter axis (or reset it to pass-through?)
            self.logScale = self._cl_stream.logScale

            if hasattr(self._cl_stream, "detGain"):
                self._cl_stream.detGain.subscribe(self._on_cl_gain)

            # Update the acquisition time when it might change (ie, the scan settings
            # change)
            self._cl_stream.emtDwellTime.subscribe(self._update_exp_dur)
            self._cl_stream.emtResolution.subscribe(self._update_exp_dur)

        # Note: for now we don't really support SPARC with BOTH CL-detector and
        # monochromator.
        if main_data.monochromator:
            self._mn_stream = LiveCLStream(
                "Monochromator",
                main_data.monochromator,
                main_data.monochromator.data,
                main_data.ebeam,
                focuser=main_data.ebeam_focus,
                emtvas=emtvas,
                detvas=get_local_vas(main_data.monochromator,
                                     main_data.hw_settings_config),
                opm=main_data.opm,
            )
            self._mn_stream.emtDwellTime.subscribe(self._update_exp_dur)
            self._mn_stream.emtResolution.subscribe(self._update_exp_dur)

            # spg = self._getAffectingSpectrograph(main_data.spectrometer)
            # TODO: show axes

        self._dlg = None

        self.addMenu("Acquisition/Quick CL...\tF2", self.start)

    def _show_axes(self, sctrl, axes, sclass):
        """
        Show axes in settings panel for a given stream.
        sctrl (StreamController): stream controller
        axes (str -> comp): list of axes to display
        sclass (Stream): stream class of (settings) stream
        """
        stream_configs = get_stream_settings_config()
        stream_config = stream_configs.get(sclass, {})

        # Add Axes (in same order as config)
        axes_names = util.sorted_according_to(axes.keys(),
                                              list(stream_config.keys()))
        for axisname in axes_names:
            comp = axes[axisname]
            if comp is None:
                logging.debug("Skipping axis %s for non existent component",
                              axisname)
                continue
            if axisname not in comp.axes:
                logging.debug("Skipping non existent axis %s on component %s",
                              axisname, comp.name)
                continue
            conf = stream_config.get(axisname)
            sctrl.add_axis_entry(axisname, comp, conf)

    def _getAffectingSpectrograph(self, comp):
        """
        Find which spectrograph matters for the given component (ex, spectrometer)
        comp (Component): the hardware which is affected by a spectrograph
        return (None or Component): the spectrograph affecting the component
        """
        cname = comp.name
        main_data = self.main_app.main_data
        for spg in (main_data.spectrograph, main_data.spectrograph_ded):
            if spg is not None and cname in spg.affects.value:
                return spg
        else:
            logging.warning("No spectrograph found affecting component %s",
                            cname)
            # spg should be None, but in case it's an error in the microscope file
            # and actually, there is a spectrograph, then use that one
            return main_data.spectrograph

    def _update_filename(self):
        """
        Set filename from pattern in conf file
        """
        fn = create_filename(self.conf.last_path, self.conf.fn_ptn, '.png',
                             self.conf.fn_count)
        self.conf.fn_count = update_counter(self.conf.fn_count)

        # Update the widget, without updating the pattern and counter again
        self.filename.unsubscribe(self._on_filename)
        self.filename.value = fn
        self.filename.subscribe(self._on_filename)

    def _on_filename(self, fn):
        """
        Warn if extension not .png, store path and pattern in conf file
        """
        bn, ext = splitext(fn)
        if not ext.endswith(".png") and not ALLOW_SAVE:
            logging.warning("Only PNG format is recommended to use")

        # Store the directory so that next filename is in the same place
        p, bn = os.path.split(fn)
        if p:
            self.conf.last_path = p

        # Save pattern
        self.conf.fn_ptn, self.conf.fn_count = guess_pattern(fn)

    def _get_acq_streams(self):
        ss = []
        if hasattr(self, "_cl_stream"):
            ss.append(self._cl_stream)
        if hasattr(self, "_mn_stream"):
            ss.append(self._mn_stream)

        return ss

    def _update_exp_dur(self, _=None):
        """
        Shows how long the CL takes to acquire
        """
        tott = sum(s.estimateAcquisitionTime()
                   for s in self._get_acq_streams())
        tott = math.ceil(tott)  # round-up to 1s

        # Use _set_value as it's read only
        self.expectedDuration._set_value(tott, force_write=True)

    def _on_cl_gain(self, g):
        # This works around an annoyance on the current hardware/GUI:
        # the histogram range can only increase. However, for now the hardware
        # sends data in a small range, but at different value depending on the
        # gain. This causes the range to rapidly grow when changing the gain,
        # but once the actual data range is stable, it looks tiny on the whole
        # histogram. => Force resizing when changing gain.
        self._cl_stream._drange_unreliable = False
        logging.debug("Set the drange back to unreliable")

    # keycode to FoV ratio: 0.9 ~= 90% of the screen
    _key_to_move = {
        wx.WXK_LEFT: (-0.9, 0),
        wx.WXK_RIGHT: (0.9, 0),
        wx.WXK_UP: (0, 0.9),
        wx.WXK_DOWN: (0, -0.9),
    }

    def on_char(self, evt):
        key = evt.GetKeyCode()

        if (canvas.CAN_DRAG in self._canvas.abilities
                and key in self._key_to_move):
            move = self._key_to_move[key]
            if evt.ShiftDown():  # softer
                move = tuple(s / 8 for s in move)

            if self._dlg.view.fov_hw:
                fov_x = self._dlg.view.fov_hw.horizontalFoV.value
                shape = self._dlg.view.fov_hw.shape
                fov = (fov_x, fov_x * shape[1] / shape[0])
            else:
                fov = self._dlg.view.fov.value
            shift = [m * f for m, f in zip(move, fov)]
            self._dlg.view.moveStageBy(shift)

            # We "eat" the event, so the canvas will never react to it
        else:
            evt.Skip()  # Pretend we never got here in the first place

    def start(self):
        """
        Called when the menu entry is selected
        """
        main_data = self.main_app.main_data

        # Stop the streams of the active tab
        tab_data = main_data.tab.value.tab_data_model
        for s in tab_data.streams.value:
            s.should_update.value = False

        # First time, create a proper filename
        if not self.filename.value:
            self._update_filename()
        self._update_exp_dur()

        # immediately switch optical path, to save time
        main_data.opm.setPath(self._get_acq_streams()[0])  # non-blocking

        # Add connection to SEM hFoV if possible
        fov_hw = None
        if main_data.ebeamControlsMag:
            fov_hw = main_data.ebeam
        dlg = ContentAcquisitionDialog(self,
                                       "Cathodoluminecense acquisition",
                                       stage=main_data.stage,
                                       fov_hw=fov_hw)
        self._dlg = dlg
        # Listen to the key events, to move the stage by 90% of the FoV when
        # pressing the arrow keys (instead of 100px).
        # Note: this only matters when the view is in focus
        # TODO: make it like the alignment tab, available everywhere
        if main_data.stage:
            self._canvas = dlg.viewport_l.canvas
            self._canvas.Bind(wx.EVT_CHAR, self.on_char)

        if fov_hw:
            dlg.viewport_l.canvas.fit_view_to_next_image = False

        # Use pass-through filter by default
        if main_data.cl_filter and "band" in main_data.cl_filter.axes:
            # find the "pass-through"
            bdef = main_data.cl_filter.axes["band"]
            for b, bn in bdef.choices.items():
                if bn == "pass-through":
                    main_data.cl_filter.moveAbs({"band": b})
                    break
            else:
                logging.debug("Pass-through not found in the CL-filter")

        dlg.addStream(self._sem_stream)
        for s in self._get_acq_streams():
            dlg.addStream(s)

        self._setup_sbar_cont()
        dlg.addSettings(self, self.vaconf)
        if ALLOW_SAVE:
            dlg.addButton("Save", self.save, face_colour='blue')
        dlg.addButton("Export", self.export, face_colour='blue')

        dlg.Maximize()
        dlg.ShowModal()

        # Window is closed

        # Make sure the streams are not playing anymore
        dlg.streambar_controller.pauseStreams()
        dlg.Destroy()
        self._dlg = None

        # Update filename in main window
        tab_acqui = main_data.getTabByName("sparc_acqui")
        tab_acqui.acquisition_controller.update_fn_suggestion()

    @call_in_wx_main
    def _setup_sbar_cont(self):
        # The following code needs to be run asynchronously to make sure the streams are added to
        # the streambar controller first in .addStream.
        main_data = self.main_app.main_data
        sconts = self._dlg.streambar_controller.stream_controllers

        # Add axes to monochromator and cl streams
        if hasattr(self, "_mn_stream"):
            spg = self._getAffectingSpectrograph(main_data.monochromator)
            axes = {
                "wavelength": spg,
                "grating": spg,
                "slit-in": spg,
                "slit-monochromator": spg,
            }
            scont = [
                sc for sc in sconts
                if sc.stream.detector is main_data.monochromator
            ][0]
            self._show_axes(scont, axes, MonochromatorSettingsStream)
        if hasattr(self, "_cl_stream"):
            axes = {"band": main_data.cl_filter}
            scont = [
                sc for sc in sconts if sc.stream.detector is main_data.cld
            ][0]
            self._show_axes(scont, axes, CLSettingsStream)

        # Don't allow removing the streams
        for sctrl in sconts:
            sctrl.stream_panel.show_remove_btn(False)

    def _acq_canceller(self, future):
        return future._cur_f.cancel()

    def _acquire(self, dlg, future):
        # Stop the streams
        dlg.streambar_controller.pauseStreams()

        # Acquire (even if it was live, to be sure it's the data is up-to-date)
        ss = self._get_acq_streams()
        dur = acqmng.estimateTime(ss)
        startt = time.time()
        future._cur_f = InstantaneousFuture()
        future.task_canceller = self._acq_canceller
        future.set_running_or_notify_cancel(
        )  # Indicate the work is starting now
        future.set_progress(end=startt + dur)
        dlg.showProgress(future)

        future._cur_f = acqmng.acquire(ss,
                                       self.main_app.main_data.settings_obs)
        das, e = future._cur_f.result()
        if future.cancelled():
            raise CancelledError()

        if e:
            raise e

        return das

    def export(self, dlg):
        """
        Stores the current CL data into a PNG file
        """
        f = model.ProgressiveFuture()

        # Note: the user never needs to store the raw data or the SEM data
        try:
            das = self._acquire(dlg, f)
        except CancelledError:
            logging.debug("Stopping acquisition + export, as it was cancelled")
            return
        except Exception as e:
            logging.exception("Failed to acquire CL data: %s", e)
            return

        exporter = dataio.find_fittest_converter(self.filename.value,
                                                 allowlossy=True)

        ss = self._get_acq_streams()
        for s in ss:
            if len(ss) > 1:
                # Add a -StreamName after the filename
                bn, ext = splitext(self.filename.value)
                fn = bn + "-" + s.name.value + ext
            else:
                fn = self.filename.value

            # We actually don't care about the DAs, and will get the corresponding
            # .image, as it has been projected to RGB.
            rgbi = s.image.value
            try:
                while rgbi.metadata[model.MD_ACQ_DATE] < s.raw[0].metadata[
                        model.MD_ACQ_DATE]:
                    logging.debug("Waiting a for the RGB projection")
                    time.sleep(1)
                    rgbi = s.image.value
            except KeyError:
                # No date to check => let's hope it's fine
                pass

            try:
                if self.hasDatabar.value:
                    # Use MPP and FoV so that the whole image is displayed, at 1:1
                    view_pos = rgbi.metadata[model.MD_POS]
                    pxs = rgbi.metadata[model.MD_PIXEL_SIZE]
                    # Shape is YXC
                    view_hfw = rgbi.shape[1] * pxs[0], rgbi.shape[0] * pxs[1]
                    exdata = img.images_to_export_data(
                        [s],
                        view_hfw,
                        view_pos,
                        draw_merge_ratio=1.0,
                        raw=False,
                        interpolate_data=False,
                        logo=self.main_app.main_frame.legend_logo)
                else:
                    exdata = rgbi

                exporter.export(fn, exdata)
            except Exception:
                logging.exception("Failed to store data in %s", fn)

        f.set_result(None)  # Indicate it's over
        self._update_filename()

    def save(self, dlg):
        """
        Stores the current CL data into a TIFF/HDF5 file
        """
        f = model.ProgressiveFuture()

        try:
            das = self._acquire(dlg, f)
        except CancelledError:
            logging.debug("Stopping acquisition + export, as it was cancelled")
            return
        except Exception as e:
            logging.exception("Failed to acquire CL data: %s", e)
            return

        fn = self.filename.value
        bn, ext = splitext(fn)
        if ext == ".png":
            logging.debug("Using HDF5 instead of PNG")
            fn = bn + ".h5"
        exporter = dataio.find_fittest_converter(fn)

        try:
            exporter.export(fn, das)
        except Exception:
            logging.exception("Failed to store data in %s", fn)

        f.set_result(None)  # Indicate it's over
        self._update_filename()
Example #10
0
class ZStackPlugin(Plugin):
    name = "Z Stack"
    __version__ = "1.4"
    __author__ = u"Anders Muskens, Éric Piel"
    __license__ = "GPLv2"

    # Describe how the values should be displayed
    # See odemis.gui.conf.data for all the possibilities
    vaconf = OrderedDict((
        ("filename", {
            "control_type": odemis.gui.CONTROL_SAVE_FILE,
            "wildcard": formats_to_wildcards(get_available_formats(os.O_WRONLY))[0],
        }),
        ("zstep", {
            "control_type": odemis.gui.CONTROL_FLT,
        }),
        ("zstart", {
            "control_type": odemis.gui.CONTROL_FLT,
        }),
        ("numberOfAcquisitions", {
            "control_type": odemis.gui.CONTROL_INT,  # no slider
        }),
        ("expectedDuration", {}),
    ))

    def __init__(self, microscope, main_app):
        super(ZStackPlugin, self).__init__(microscope, main_app)
        # Can only be used with a microscope
        main_data = self.main_app.main_data
        
        if not microscope or main_data.focus is None:
            return

        self.focus = main_data.focus
        self._zrange = self.focus.axes['z'].range
        zunit = self.focus.axes['z'].unit
        self._old_pos = self.focus.position.value
        z = max(self._zrange[0], min(self._old_pos['z'], self._zrange[1]))
        self.zstart = model.FloatContinuous(z, range=self._zrange, unit=zunit)
        self.zstep = model.FloatContinuous(1e-6, range=(-1e-5, 1e-5), unit=zunit, setter=self._setZStep)
        self.numberOfAcquisitions = model.IntContinuous(3, (2, 999), setter=self._setNumberOfAcquisitions)

        self.filename = model.StringVA("a.h5")
        self.expectedDuration = model.VigilantAttribute(1, unit="s", readonly=True)

        self.zstep.subscribe(self._update_exp_dur)
        self.numberOfAcquisitions.subscribe(self._update_exp_dur)
        
        # Two acquisition order possible:
        # * for each Z, all the streams (aka intertwined): Z exactly the same for each stream
        # * for each stream, the whole Z stack: Might be faster (if filter wheel used for changing wavelength)
        self._streams_intertwined = True
        if main_data.light_filter and len(main_data.light_filter.axes["band"].choices) > 1:
            logging.info("Filter-wheel detected, Z-stack will be acquired stream-per-stream")
            self._streams_intertwined = False

        self._acq_streams = None  # previously folded streams, for optimisation
        self._dlg = None
        self.addMenu("Acquisition/ZStack...\tCtrl+B", self.start)
        
    def _acqRangeIsValid(self, acq_range):
        return self._zrange[0] <= acq_range <= self._zrange[1]

    def _setZStep(self, zstep):
        # Check if the acquisition will be within the range of the actuator
        acq_range = self.zstart.value + zstep * self.numberOfAcquisitions.value
        if self._acqRangeIsValid(acq_range):
            return zstep
        else:
            return self.zstep.value  # Old value
        
    def _setNumberOfAcquisitions(self, n_acq):
        # Check if the acquisition will be within the range of the actuator
        acq_range = self.zstart.value + self.zstep.value * n_acq
        if self._acqRangeIsValid(acq_range):
            return n_acq
        else:
            return self.numberOfAcquisitions.value  # Old value

    def _get_new_filename(self):
        conf = get_acqui_conf()
        return os.path.join(
            conf.last_path,
            u"%s%s" % (time.strftime("%Y%m%d-%H%M%S"), conf.last_extension)
        )

    def _estimate_step_duration(self):
        """
        return (float > 0): estimated time (in s) that it takes to move the focus
          by one step.
        """
        speed = None
        if model.hasVA(self.focus, "speed"):
            speed = self.focus.speed.value.get('z', None)
        if speed is None:
            speed = 10e-6  # m/s, pessimistic

        return driver.estimateMoveDuration(abs(self.zstep.value), speed, 0.01)

    def _update_exp_dur(self, _=None):
        """
        Called when VA that affects the expected duration is changed
        """
        nsteps = self.numberOfAcquisitions.value
        step_time = self._estimate_step_duration()
        ss = self._get_acq_streams()

        sacqt = acqmng.estimateTime(ss)
        if self._streams_intertwined:
            # Moving the focus will have to be done for every stream
            dur = sacqt * nsteps + step_time * (nsteps - 1) * len(ss)
        else:
            dur = sacqt * nsteps + step_time * (nsteps - 1)

        logging.debug("Estimating acquisition of %d streams will take %g s", len(ss), dur)

        # Use _set_value as it's read only
        self.expectedDuration._set_value(math.ceil(dur), force_write=True)

    def _get_live_streams(self, tab_data):
        """
        Return all the live streams present in the given tab
        """
        ss = list(tab_data.streams.value)

        # On the SPARC, there is a Spot stream, which we don't need for live
        if hasattr(tab_data, "spotStream"):
            try:
                ss.remove(tab_data.spotStream)
            except ValueError:
                pass  # spotStream was not there anyway

        for s in ss:
            if isinstance(s, StaticStream):
                ss.remove(s)
        return ss

    def _get_acq_streams(self):
        """
        Return the streams that should be used for acquisition
        return:
           acq_st (list of streams): the streams to be acquired at every repetition
        """
        if not self._dlg:
            return []

        live_st = (self._dlg.view.getStreams() +
                   self._dlg.hidden_view.getStreams())
        logging.debug("View has %d streams", len(live_st))

        # On the SPARC, the acquisition streams are not the same as the live
        # streams. On the SECOM/DELPHI, they are the same (for now)
        tab_data = self.main_app.main_data.tab.value.tab_data_model
        if hasattr(tab_data, "acquisitionStreams"):
            acq_st = tab_data.acquisitionStreams
            # Discard the acquisition streams which are not visible
            ss = []
            for acs in acq_st:
                if isinstance(acs, stream.MultipleDetectorStream):
                    if any(subs in live_st for subs in acs.streams):
                        ss.append(acs)
                        break
                elif acs in live_st:
                    ss.append(acs)
        else:
            # No special acquisition streams
            ss = live_st

        self._acq_streams = acqmng.foldStreams(ss, self._acq_streams)
        return self._acq_streams

    def _on_focus_pos(self, pos):
        # Do not listen to zstart when we change it, to make sure there is no loop
        self.zstart.unsubscribe(self._on_zstart)
        self.zstart.value = pos["z"]
        self.zstart.subscribe(self._on_zstart)

    def _on_zstart(self, zpos):
        self.focus.moveAbs({"z": zpos})
        # Don't wait for it to finish moving, eventually it will update the
        # focus position... and will set the zstart value

    def start(self):
        # Fail if the live tab is not selected
        tab = self.main_app.main_data.tab.value
        if tab.name not in ("secom_live", "sparc_acqui", "cryosecom-localization"):
            box = wx.MessageDialog(self.main_app.main_frame,
                       "ZStack acquisition must be done from the acquisition stream.",
                       "ZStack acquisition not possible", wx.OK | wx.ICON_STOP)
            box.ShowModal()
            box.Destroy()
            return

        # On SPARC, fail if no ROI selected
        if hasattr(tab.tab_data_model, "roa") and tab.tab_data_model.roa.value == UNDEFINED_ROI:
            box = wx.MessageDialog(self.main_app.main_frame,
                       "You need to select a region of acquisition.",
                       "Z stack acquisition not possible", wx.OK | wx.ICON_STOP)
            box.ShowModal()
            box.Destroy()
            return

        # Stop the stream(s) playing to not interfere with the acquisition
        tab.streambar_controller.pauseStreams()

        self.filename.value = self._get_new_filename()
        dlg = AcquisitionDialog(self, "Z Stack acquisition",
                                "The same streams will be acquired multiple times at different Z positions, defined starting from Z start, with a step size.\n")
        self._dlg = dlg
        dlg.addSettings(self, self.vaconf)
        ss = self._get_live_streams(tab.tab_data_model)
        for s in ss:
            if isinstance(s, (ARStream, SpectrumStream, MonochromatorSettingsStream)):
                # TODO: instead of hard-coding the list, a way to detect the type
                # of live image?
                logging.info("Not showing stream %s, for which the live image is not spatial", s)
                dlg.addStream(s, index=None)
            else:
                dlg.addStream(s)
        dlg.addButton("Cancel")
        dlg.addButton("Acquire", self.acquire, face_colour='blue')

        # Connect zstart with the actual focus position
        self.zstart.subscribe(self._on_zstart)
        self.focus.position.subscribe(self._on_focus_pos, init=True)

        # Update acq time when streams are added/removed
        dlg.view.stream_tree.flat.subscribe(self._update_exp_dur, init=True)
        dlg.hidden_view.stream_tree.flat.subscribe(self._update_exp_dur, init=True)
        # TODO: update the acquisition time whenever a setting changes

        # TODO: disable "acquire" button if no stream selected

        # TODO: also display the repetition and axis settings for the SPARC streams.

        ans = dlg.ShowModal()

        if ans == 0:
            logging.info("Acquisition cancelled")
        elif ans == 1:
            logging.info("Acquisition completed")
        else:
            logging.warning("Got unknown return code %s", ans)

        self.focus.position.unsubscribe(self._on_focus_pos)
        self.zstart.unsubscribe(self._on_zstart)

        # Don't hold references
        self._acq_streams = None
        if dlg:  # If dlg hasn't been destroyed yet
            dlg.Destroy()

    def constructCube(self, images):
        # images is a list of 3 dim data arrays.
        ret = []
        for image in images:
            stack = numpy.dstack(image)
            stack = numpy.swapaxes(stack, 1, 2)
            ret.append(stack[0])

        # Add back metadata
        metadata3d = copy.copy(images[0].metadata)
        # Extend pixel size to 3D
        ps_x, ps_y = metadata3d[model.MD_PIXEL_SIZE]
        ps_z = self.zstep.value

        # Computer cube centre
        c_x, c_y = metadata3d[model.MD_POS]
        c_z = self.zstart.value + (self.zstep.value * self.numberOfAcquisitions.value) / 2
        metadata3d[model.MD_POS] = (c_x, c_y, c_z)

        # For a negative pixel size, convert to a positive and flip the z axis
        if ps_z < 0:
            ret = numpy.flipud(ret)
            ps_z = -ps_z

        metadata3d[model.MD_PIXEL_SIZE] = (ps_x, ps_y, abs(ps_z))
        metadata3d[model.MD_DIMS] = "ZYX"

        ret = DataArray(ret, metadata3d)

        return ret

    """
    The acquire function API is generic.
    Special functionality is added in the functions
    """

    def initAcquisition(self):
        """
        Called before acquisition begins.
        Returns: (float) estimate of time per step
        """
        logging.info("Z stack acquisition started with %d levels", self.numberOfAcquisitions.value)

        # Move the focus to the start z position
        logging.debug("Preparing Z Stack acquisition. Moving focus to start position")
        self._old_pos = self.focus.position.value
        self.focus.moveAbs({'z': self.zstart.value}).result()
        self.focus.position.unsubscribe(self._on_focus_pos)  # to not update zstart when going through the steps
        self.zstart.unsubscribe(self._on_zstart)
        return self._estimate_step_duration()

    def preStepAcquisition(self, i):
        """
        Called before the ith step of the acquisition
        i (0 <= int): the step number
        """
        self.focus.moveAbs({'z': self.zstart.value + self.zstep.value * i}).result()

    def postStepAcquisition(self, i, images):
        """
        Called after the ith step of the acquisition
        i (0 <= int): the step number
        images []: A list of images as DataArrays
        """
        # Nothing to do after a focus step
        pass

    def completeAcquisition(self, completed):
        """
        Run actions that clean up after the acquisition occurs.
        completed (bool): True if completed without trouble
        """
        # Mvoe back to start
        if completed:
            logging.info("Z Stack acquisition complete.")
        logging.debug("Returning focus to original position %s", self._old_pos)
        self.focus.moveAbs(self._old_pos).result()
        self.focus.position.subscribe(self._on_focus_pos)
        self.zstart.subscribe(self._on_zstart)
        
    def postProcessing(self, images):
        """
        Post-process the images after the acquisition is done.
        images []: list of list of DataArrays (2D): first dim is the different streams,
        the second dimension is the different acquisition number.
        Returns: [list] list of a list of images that have been processed
        """
        cubes = [self.constructCube(ims) for ims in images]
        return cubes

    def acquire(self, dlg):
        """
        Acquisition operation.
        """
        main_data = self.main_app.main_data
        str_ctrl = main_data.tab.value.streambar_controller
        stream_paused = str_ctrl.pauseStreams()
        dlg.pauseSettings()

        nb = self.numberOfAcquisitions.value
        ss = self._get_acq_streams()
        sacqt = acqmng.estimateTime(ss)
        logging.debug("Acquisition streams: %s", ss)

        # all_ss is a list of list of streams to acquire. In theory, we could do
        # several set of acquisitions with each a set of streams. However, that's
        # not how it's used. It's just a generic way to handle both cases:
        # either each acquisition has only one stream, or there is a single
        # acquisition to do all the stream.
        if self._streams_intertwined:
            # Streams are fastest changed: for each step, all streams are acquired
            all_ss = [ss]
        else:
            # Streams are slowest changed: for each stream, do all steps together
            all_ss = [[s] for s in ss]
        
        try:
            # list of list of list of DataArray: for each acquisition, for each stream, for each step, the data acquired
            all_images = [[] for _ in all_ss]
            completed = False

            step_time = self.initAcquisition()
            # TODO: if drift correction, use it over all the time
            f = model.ProgressiveFuture()
            f.task_canceller = lambda l: True  # To allow cancelling while it's running
            f.set_running_or_notify_cancel()  # Indicate the work is starting now
            dlg.showProgress(f)

            total_nb = left = len(all_ss) * nb
            
            logging.debug("Will repeat the acquisition %d times", len(all_ss))
            for ss, images in zip(all_ss, all_images):
                for i in range(nb):
                    dur = sacqt * left + step_time * (left - 1)
                    logging.debug("Acquisition %d of %d", total_nb - left, total_nb)

                    startt = time.time()
                    f.set_progress(end=startt + dur)

                    # Prepare the axis for this step
                    self.preStepAcquisition(i)
                    das, e = acqmng.acquire(ss, self.main_app.main_data.settings_obs).result()
                    if e:
                        logging.warning("Will continue, although acquisition %d partially failed: %s", e)
                    if len(das) != len(ss):
                        logging.warning("Expected %d DataArrays, but got %d", len(ss), len(das))

                    if not images:
                        images[:] = [[] for _ in das]

                    for im, da in zip(images, das):
                        im.append(da)

                    if f.cancelled():
                        raise CancelledError()

                    # Clean-up or adjust the images
                    self.postStepAcquisition(i, images)
                    left -= 1

            # Collate back all the data as "for each stream, all the images acquired"
            images = []
            for ii in all_images:
                images.extend(ii)
            # Construct a cube from each stream's image.
            images = self.postProcessing(images)

            # Export image
            exporter = dataio.find_fittest_converter(self.filename.value)
            exporter.export(self.filename.value, images)

            f.set_result(None)  # Indicate it's over
            completed = True
            dlg.Close()
            
        except CancelledError:
            logging.debug("Acquisition cancelled.")
            dlg.resumeSettings()

        except Exception as e:
            logging.exception(e)

        finally:
            # Do completion actions
            self.completeAcquisition(completed)
Example #11
0
class MergeChannelsPlugin(Plugin):
    name = "Add RGB channels"
    __version__ = "1.0"
    __author__ = u"Victoria Mavrikopoulou"
    __license__ = "GPLv2"

    # The values are displayed with the following order
    vaconf = OrderedDict((
        ("filenameR", {
            "label":
            "Red channel",
            "control_type":
            odemis.gui.CONTROL_OPEN_FILE,
            "wildcard":
            formats_to_wildcards(get_available_formats(os.O_RDONLY),
                                 include_all=True)[0]
        }),
        ("redShiftX", {
            "label": "   Red shift X"
        }),
        ("redShiftY", {
            "label": "   Red shift Y"
        }),
        ("filenameG", {
            "label":
            "Green channel",
            "control_type":
            odemis.gui.CONTROL_OPEN_FILE,
            "wildcard":
            formats_to_wildcards(get_available_formats(os.O_RDONLY),
                                 include_all=True)[0]
        }),
        ("greenShiftX", {
            "label": "   Green shift X"
        }),
        ("greenShiftY", {
            "label": "   Green shift Y"
        }),
        ("filenameB", {
            "label":
            "Blue channel",
            "control_type":
            odemis.gui.CONTROL_OPEN_FILE,
            "wildcard":
            formats_to_wildcards(get_available_formats(os.O_RDONLY),
                                 include_all=True)[0]
        }),
        ("blueShiftX", {
            "label": "   Blue shift X"
        }),
        ("blueShiftY", {
            "label": "   Blue shift Y"
        }),
        ("cropBottom", {
            "label": "Crop bottom"
        }),
    ))

    def __init__(self, microscope, main_app):
        super(MergeChannelsPlugin, self).__init__(microscope, main_app)

        self.filenameR = model.StringVA(" ")
        self.filenameG = model.StringVA(" ")
        self.filenameB = model.StringVA(" ")
        self.redShiftX = model.FloatContinuous(0, range=(-500, 500), unit="px")
        self.redShiftY = model.FloatContinuous(0, range=(-500, 500), unit="px")
        self.greenShiftX = model.FloatContinuous(0,
                                                 range=(-500, 500),
                                                 unit="px")
        self.greenShiftY = model.FloatContinuous(0,
                                                 range=(-500, 500),
                                                 unit="px")
        self.blueShiftX = model.FloatContinuous(0,
                                                range=(-500, 500),
                                                unit="px")
        self.blueShiftY = model.FloatContinuous(0,
                                                range=(-500, 500),
                                                unit="px")
        self.cropBottom = model.IntContinuous(0, range=(0, 200), unit="px")

        analysis_tab = self.main_app.main_data.getTabByName('analysis')
        analysis_tab.stream_bar_controller.add_action("Add RGB channels...",
                                                      self.start)

        self.filenameR.subscribe(self._filenameR)
        self.filenameG.subscribe(self._filenameG)
        self.filenameB.subscribe(self._filenameB)
        self.cropBottom.subscribe(self._cropBottom)

        self._subscribers = []
        self._dlg = None
        self._stream_red = None
        self._stream_green = None
        self._stream_blue = None
        self._raw_orig = {
        }  # dictionary (Stream -> DataArray) to handle the (un)cropping

    def start(self):
        dlg = AcquisitionDialog(
            self,
            "Merging channels to RGB image",
            text="Insert 3 R, G, B files so that they are assigned the tints \n"
            "and are merged to an RGB image.")
        # remove the play overlay from the viewport
        dlg.viewport_l.canvas.remove_view_overlay(
            dlg.viewport_l.canvas.play_overlay)

        self._dlg = dlg
        dlg.addStream(None)
        dlg.Size = (1000, 600)

        dlg.addSettings(self, self.vaconf)
        dlg.addButton("Cancel", None)
        dlg.addButton("Add", self._updateViewer, face_colour='blue')

        dlg.pnl_gauge.Hide()
        dlg.ShowModal()  # Blocks until the window is closed

        # Destroy the dialog and reset the VAs and subscribers
        dlg.Destroy()
        self.filenameR.value = " "
        self.filenameG.value = " "
        self.filenameB.value = " "
        self.redShiftX.value = 0
        self.redShiftY.value = 0
        self.greenShiftX.value = 0
        self.greenShiftY.value = 0
        self.blueShiftX.value = 0
        self.blueShiftY.value = 0
        self.cropBottom.value = 0
        self._subscribers = []
        self._dlg = None
        self._raw_orig = {}

    def _filenameR(self, filenameR):
        """Open the filename that corresponds to RED channel. If an image is already inserted, remove the old stream
        and add the new stream in the Acquisition Dialog."""
        if self._stream_red is not None:
            self._removeStream(self._stream_red)
        self._stream_red = self._openImage(filenameR, TINT_RED, self.redShiftX,
                                           self.redShiftY)
        self._storeDir(filenameR)

    def _filenameG(self, filenameG):
        """Open the filename that corresponds to GREEN channel. If an image is already inserted, remove the old stream
        and add the new stream in the Acquisition Dialog."""
        if self._stream_green is not None:
            self._removeStream(self._stream_green)
        self._stream_green = self._openImage(filenameG, TINT_GREEN,
                                             self.greenShiftX,
                                             self.greenShiftY)
        self._storeDir(filenameG)

    def _filenameB(self, filenameB):
        """Open the filename that corresponds to BLUE channel. If an image is already inserted, remove the old stream
        and add the new stream in the Acquisition Dialog."""
        if self._stream_blue is not None:
            self._removeStream(self._stream_blue)
        self._stream_blue = self._openImage(filenameB, TINT_BLUE,
                                            self.blueShiftX, self.blueShiftY)
        self._storeDir(filenameB)

    def _storeDir(self, fn):
        """Store the directory of the given filename so as the next filename is in the same place"""
        path, bn = os.path.split(fn)
        files = [self.filenameR, self.filenameG, self.filenameB]
        for se in self._dlg.setting_controller.entries:
            if se.vigilattr in files:
                se.value_ctrl.default_dir = path

    def _openImage(self, filename, tint, shiftX, shiftY):
        """ Open the given filename and assign the tint of the corresponding channel. Add the stream to the dialog and
        apply the crop functionality. Two sliders are displayed for every image to provide the option of shifting the
        streams in x and y dimension. If there is no filename given return None.
        Args:
            filename(str) : the given filename with the R, G or B stream
            tint(tuple): the color tint to be assigned
            shiftX(ContinuousVA): shift x value in meters
            shiftY(ContinuousVA): shift y value in meters
        Returns (Stream or None): the displayed stream
        """
        if filename == " ":
            return None

        try:
            data = udataio.open_acquisition(filename)[0]
            pxs = data.metadata.get(model.MD_PIXEL_SIZE, (1e-06, 1e-06))
            if pxs[0] > 1e-04 or pxs[1] > 1e-04:
                data.metadata[model.MD_PIXEL_SIZE] = (1e-06, 1e-06)
                logging.warning(
                    "The given pixel size %s is too big, it got replaced to the default value %s",
                    pxs, (1e-06, 1e-06))
            data = self._ensureRGB(data, tint)
        except Exception as ex:
            logging.exception("Failed to open %s", filename)
            self._showErrorMessage("Failed to open image",
                                   "Failed to open image:\n%s" % (ex, ))
            return None

        basename, ext = os.path.splitext(os.path.split(filename)[1])
        stream_ch = stream.StaticFluoStream(basename, data)
        self._raw_orig[stream_ch] = data
        self._dlg.addStream(stream_ch)
        self._setupStreambar()

        self._cropBottom()
        self._connectShift(stream_ch, 0, shiftX)
        self._connectShift(stream_ch, 1, shiftY)

        return stream_ch

    @call_in_wx_main
    def _showErrorMessage(self, title, msg):
        """
        Shows an error message in a message box
        title (str)
        msg (str)
        """
        box = wx.MessageDialog(self._dlg, msg, title, wx.OK | wx.ICON_STOP)
        box.ShowModal()
        box.Destroy()

    def _ensureRGB(self, data, tint):
        """
        Ensures that the image is grayscale. If the image is a grayscale RGB, convert it
        to an 8bit grayscale image of 2 dimensions and assign the corresponding tint to it.
        Update the metadata of the image.
        data (DataArray or DataArrayShadow): The input image
        return (DataArray): The result image which the assigned tint
        raises: ValueError if the image is RGB with different color channels
        """
        if len(data.shape) > 3:
            raise ValueError("Image format not supported")
        if isinstance(data, model.DataArrayShadow):
            data = data.getData()
        if len(data.shape) == 3:
            data = img.ensureYXC(data)
            if (numpy.all(data[:, :, 0] == data[:, :, 1])
                    and numpy.all(data[:, :, 0] == data[:, :, 2])):
                data = data[:, :, 0]
                data.metadata[model.MD_DIMS] = "YX"
            else:
                raise ValueError("Coloured RGB image not supported")

        if model.MD_POS not in data.metadata:
            data.metadata[model.MD_POS] = (0, 0)
        if model.MD_PIXEL_SIZE not in data.metadata:
            data.metadata[model.MD_PIXEL_SIZE] = (1e-9, 1e-9)
        data.metadata[model.MD_USER_TINT] = tint

        return data

    def _connectShift(self, stream, index, vashift):
        """Create listeners with information of the stream and the dimension.
        Hold a reference to the listeners to prevent automatic subscription"""
        va_on_shift = functools.partial(self._onShift, stream, index)
        self._subscribers.append(va_on_shift)
        vashift.subscribe(va_on_shift)

    def _removeStream(self, st):
        """Remove the given stream since another one is loaded from the user for display"""
        sconts = self._dlg.streambar_controller.stream_controllers
        for sc in sconts:
            if sc.stream is st:
                sc.stream_panel.on_remove_btn(st)
                del self._raw_orig[st]

    @call_in_wx_main
    def _setupStreambar(self):
        """Force stream panel to static mode. Needed for preventing user to play or
        remove streams from the stream panel"""
        sconts = self._dlg.streambar_controller.stream_controllers
        for sctrl in sconts:
            sctrl.stream_panel.to_static_mode()

    def _onShift(self, stream, i, value):
        """
        Update the stream after shifting it by the given value.
        Args:
            stream(StaticFluoStream): stream to be shifted
            i(int): index to show at which dimension the stream is to be shifted
            value(ContinuousVA): shift values in meters
        """
        logging.debug("New shift = %f on stream %s", value, stream.name.value)
        poscor = stream.raw[0].metadata.get(model.MD_POS_COR, (0, 0))
        px_size = stream.raw[0].metadata[model.MD_PIXEL_SIZE]
        if i == 0:
            poscor = (-value * px_size[0], poscor[1])
        else:
            poscor = (poscor[0], -value * px_size[1])
        stream.raw[0].metadata[model.MD_POS_COR] = poscor
        self._forceUpdate(stream)

    def _cropBottom(self, _=None):
        """Crop the data bar at the bottom of the image"""
        for st, r in self._raw_orig.items():
            prev_md = st.raw[0].metadata
            st.raw[0] = r[:max(1, r.shape[0] - self.cropBottom.value), :]
            st.raw[0].metadata = prev_md
            self._forceUpdate(st)

    def _forceUpdate(self, st):
        """Force updating the projection of the given stream"""
        views = [self._dlg.view]
        for v in views:
            for sp in v.stream_tree.getProjections():  # stream or projection
                if isinstance(sp, DataProjection):
                    s = sp.stream
                else:
                    s = sp
                if s is st:
                    sp._shouldUpdateImage()

    def _updateViewer(self, dlg):
        """Update the view in the Analysis Tab with the merged image.
        Called when the user clicks on Done to close the dialog"""
        views = [self._dlg.view]
        das = []
        for v in views:
            for st in v.stream_tree.getProjections():  # stream or projection
                if isinstance(st, DataProjection):
                    s = st.stream
                else:
                    s = st
                das.append(s.raw[0])

        analysis_tab = self.main_app.main_data.tab.value
        analysis_tab.display_new_data(self.filenameR.value, das, extend=True)

        dlg.Close()
Example #12
0
class AveragePlugin(Plugin):
    name = "Frame Average"
    __version__ = "1.1"
    __author__ = u"Éric Piel"
    __license__ = "Public domain"

    # Describe how the values should be displayed
    # See odemis.gui.conf.data for all the possibilities
    vaconf = OrderedDict((
        ("dwellTime", {
            "tooltip": "Time spent on each pixel for one frame",
            "scale": "log",
            "type": "float",
            "accuracy": 2,
        }),
        (
            "accumulations",
            {
                "tooltip": "Number of frames acquired and averaged",
                "control_type": odemis.gui.CONTROL_INT,  # no slider
            }),
        (
            "scale",
            {
                "control_type": odemis.gui.CONTROL_RADIO,
                # Can't directly use binning_1d_from_2d because it needs a component
            }),
        (
            "resolution",
            {
                "control_type": odemis.gui.CONTROL_READONLY,
                "tooltip": "Number of pixels scanned",
                "accuracy": None,  # never simplify the numbers
            }),
        ("filename", {
            "control_type":
            odemis.gui.CONTROL_SAVE_FILE,
            "wildcard":
            formats_to_wildcards(get_available_formats(os.O_WRONLY))[0],
        }),
        ("expectedDuration", {}),
    ))

    def __init__(self, microscope, main_app):
        super(AveragePlugin, self).__init__(microscope, main_app)
        # Can only be used with a microscope
        if not microscope:
            return

        # Check which stream the microscope supports
        main_data = self.main_app.main_data
        if not main_data.ebeam:
            return

        self.addMenu("Acquisition/Averaged frame...", self.start)

        dt = main_data.ebeam.dwellTime
        dtrg = (dt.range[0], min(dt.range[1], 1))
        self.dwellTime = model.FloatContinuous(dt.value,
                                               range=dtrg,
                                               unit=dt.unit)
        self.scale = main_data.ebeam.scale
        # Trick to pass the component (ebeam to binning_1d_from_2d())
        self.vaconf["scale"]["choices"] = (
            lambda cp, va, cf: odemis.gui.conf.util.binning_1d_from_2d(
                self.main_app.main_data.ebeam, va, cf))
        self.resolution = main_data.ebeam.resolution  # Just for info
        self.accumulations = model.IntContinuous(10, (1, 10000))
        self.filename = model.StringVA("a.h5")
        self.expectedDuration = model.VigilantAttribute(1,
                                                        unit="s",
                                                        readonly=True)

        self.dwellTime.subscribe(self._update_exp_dur)
        self.accumulations.subscribe(self._update_exp_dur)
        self.scale.subscribe(self._update_exp_dur)

    def _get_new_filename(self):
        conf = get_acqui_conf()
        return os.path.join(
            conf.last_path,
            u"%s%s" % (time.strftime("%Y%m%d-%H%M%S"), conf.last_extension))

    def start(self):
        """
        Called when the menu entry is selected
        """
        main_data = self.main_app.main_data

        # Stop the streams
        tab_data = main_data.tab.value.tab_data_model
        for s in tab_data.streams.value:
            s.should_update.value = False

        self.filename.value = self._get_new_filename()
        self.dwellTime.value = main_data.ebeam.dwellTime.value
        self._update_exp_dur()

        if main_data.cld:
            # If the cl-detector is present => configure the optical path (just to speed-up)
            main_data.opm.setPath("cli")

        dlg = AcquisitionDialog(
            self, "Averaged acquisition",
            "Acquires the SEM and CL intensity streams multiple times, \n"
            "as defined by the 'accumulations' setting, \n"
            "and store the average value.")
        dlg.addSettings(self, self.vaconf)
        dlg.addButton("Close")
        dlg.addButton("Acquire", self.acquire, face_colour='blue')
        ans = dlg.ShowModal()

        if ans == 0:
            logging.info("Acquisition cancelled")
        elif ans == 1:
            logging.info("Acquisition completed")
        else:
            logging.warning("Got unknown return code %s", ans)

        dlg.Destroy()

    def _update_exp_dur(self, _=None):
        """
        Called when VA that affects the expected duration is changed
        """
        res = self.main_app.main_data.ebeam.resolution.value
        # dt + 1µs for the sum and +5% for margin
        frt = numpy.prod(res) * (self.dwellTime.value + 1e-6) * 1.05
        tott = frt * self.accumulations.value + 0.1

        # Use _set_value as it's read only
        self.expectedDuration._set_value(tott, force_write=True)

    def acquire(self, dlg):
        main_data = self.main_app.main_data
        nb = self.accumulations.value
        res = self.main_app.main_data.ebeam.resolution.value
        frt = numpy.prod(res) * self.dwellTime.value * 1.05  # +5% for margin

        # All the detectors to use
        dets = [d for d in (main_data.sed, main_data.bsd, main_data.cld) if d]
        if not dets:
            raise ValueError("No EM detector available")
        logging.info("Will acquire frame average on %d detectors", len(dets))

        self._das = [None] * len(dets)  # Data just received
        sumdas = [None] * len(dets)  # to store accumulated frame (in float)
        md = [None] * len(dets)  # to store the metadata
        self._prepare_acq(dets)

        end = time.time() + self.expectedDuration.value
        if main_data.cld:
            # If the cl-detector is present => configure the optical path
            opmf = main_data.opm.setPath("cli")
            end += 10
        else:
            opmf = None

        f = model.ProgressiveFuture(end=end)
        f.task_canceller = lambda l: True  # To allow cancelling while it's running
        f.set_running_or_notify_cancel()  # Indicate the work is starting now
        dlg.showProgress(f)

        if opmf:
            opmf.result()

        try:
            for i in range(nb):
                # Update the progress bar
                left = nb - i
                dur = frt * left + 0.1
                f.set_progress(end=time.time() + dur)

                # Start acquisition
                dets[0].softwareTrigger.notify()

                # Wait for the acquisition
                for n, ev in enumerate(self._events):
                    if not ev.wait(dur * 3 + 5):
                        raise IOError("Timeout while waiting for frame")
                    ev.clear()

                    # Add the latest frame to the sum
                    # TODO: do this while waiting for the next frame (to save time)
                    da = self._das[n]
                    if sumdas[n] is None:
                        # Convert to float, to handle very large numbers
                        sumdas[n] = da.astype(numpy.float64)
                        md[n] = da.metadata
                    else:
                        sumdas[n] += da

                logging.info("Acquired frame %d", i + 1)

                if f.cancelled():
                    logging.debug("Acquisition cancelled")
                    return
        finally:
            self._end_acq(dets)

        # Compute the average data
        fdas = []
        for sd, md, ld in zip(sumdas, md, self._das):
            fdas.append(
                self._average_data(self.accumulations.value, sd, md, ld.dtype))

        logging.info("Exporting data to %s", self.filename.value)
        exporter = dataio.find_fittest_converter(self.filename.value)
        exporter.export(self.filename.value, fdas)
        f.set_result(None)  # Indicate it's over

        # Display the file
        self.showAcquisition(self.filename.value)
        dlg.Close()

    def _prepare_acq(self, dets):
        # We could synchronize all the detectors, but doing just one will force
        # the others to wait, as they are all handled by the same e-beam driver
        d0 = dets[0]
        d0.data.synchronizedOn(d0.softwareTrigger)

        # For each detector, create a listener to receive the data, and an event
        # to let the main loop know this data has been received
        self._events = []
        self._listeners = []
        for i, d in enumerate(dets):
            ev = threading.Event()
            self._events.append(ev)

            # Ad-hoc function to receive the data
            def on_data(df, data, i=i, ev=ev):
                self._das[i] = data
                ev.set()

            self._listeners.append(on_data)
            d.data.subscribe(on_data)

    def _end_acq(self, dets):
        dets[0].data.synchronizedOn(None)
        for d, l in zip(dets, self._listeners):
            d.data.unsubscribe(l)

    def _average_data(self, nb, sumda, md, dtype):
        """
        nb (int): the number of acquisitions
        sumda (DataArray): the accumulated acquisition from a detector
        md (dict): the metadata
        dtype (numpy.dtype): the data type to be converted to
        return (DataArray): the averaged frame (with the correct metadata)
        """
        a = sumda / nb
        a = model.DataArray(a.astype(dtype), md)

        # The metadata is the on from the first DataArray, which is good for
        # _almost_ everything
        if model.MD_DWELL_TIME in a.metadata:
            a.metadata[model.MD_DWELL_TIME] *= nb

        return a
Example #13
0
def ShowAcquisitionFileDialog(parent, filename):
    """
    parent (wxFrame): parent window
    filename (string): full filename to propose by default
    Note: updates the acquisition configuration if the user did pick a new file
    return (string or None): the new filename (or the None if the user cancelled)
    """
    conf = get_acqui_conf()

    # Find the available formats (and corresponding extensions)
    formats_to_ext = dataio.get_available_formats()

    # current filename
    path, base = os.path.split(filename)

    # Note: When setting 'defaultFile' when creating the file dialog, the
    #   first filter will automatically be added to the name. Since it
    #   cannot be changed by selecting a different file type, this is big
    #   nono. Also, extensions with multiple periods ('.') are not correctly
    #   handled. The solution is to use the SetFilename method instead.
    wildcards, formats = formats_to_wildcards(formats_to_ext)
    dialog = wx.FileDialog(parent,
                           message="Choose a filename and destination",
                           defaultDir=path,
                           defaultFile="",
                           style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT,
                           wildcard=wildcards)

    # Select the last format used
    prev_fmt = conf.last_format
    try:
        idx = formats.index(conf.last_format)
    except ValueError:
        idx = 0
    dialog.SetFilterIndex(idx)

    # Strip the extension, so that if the user changes the file format,
    # it will not have 2 extensions in a row.
    if base.endswith(conf.last_extension):
        base = base[:-len(conf.last_extension)]
    dialog.SetFilename(base)

    # Show the dialog and check whether is was accepted or cancelled
    if dialog.ShowModal() != wx.ID_OK:
        return None

    # New location and name have been selected...
    # Store the path
    path = dialog.GetDirectory()
    conf.last_path = path

    # Store the format
    fmt = formats[dialog.GetFilterIndex()]
    conf.last_format = fmt

    # Check the filename has a good extension, or add the default one
    fn = dialog.GetFilename()
    ext = None
    for extension in formats_to_ext[fmt]:
        if fn.endswith(extension) and len(extension) > len(ext or ""):
            ext = extension

    if ext is None:
        if fmt == prev_fmt and conf.last_extension in formats_to_ext[fmt]:
            # if the format is the same (and extension is compatible): keep
            # the extension. This avoid changing the extension if it's not
            # the default one.
            ext = conf.last_extension
        else:
            ext = formats_to_ext[fmt][0] # default extension
        fn += ext

    conf.last_extension = ext

    return os.path.join(path, fn)
Example #14
0
    def ShowExportFileDialog(self, filename, default_exporter):
        """
        filename (string): full filename to propose by default
        default_exporter (module): default exporter to be used
        return (string or None): the new filename (or the None if the user cancelled)
                (string): the format name
                (string): spatial, AR, spectrum or spectrum-line
        """
        # Find the available formats (and corresponding extensions) according
        # to the export type
        export_type = self.get_export_type(self._data_model.focussedView.value)
        formats_to_ext = self.get_export_formats(export_type)

        # current filename
        path, base = os.path.split(filename)
        uformats_to_ext = OrderedDict(formats_to_ext.values())
        wildcards, uformats = formats_to_wildcards(uformats_to_ext, suffix="")
        dialog = wx.FileDialog(self._main_frame,
                               message="Choose a filename and destination",
                               defaultDir=path,
                               defaultFile="",
                               style=wx.FD_SAVE,  # | wx.FD_OVERWRITE_PROMPT,
                               wildcard=wildcards)

        # Select the default format
        default_fmt = default_exporter.FORMAT
        try:
            uf = formats_to_ext[default_fmt][0]
            idx = uformats.index(uf)
        except ValueError:
            idx = 0
        dialog.SetFilterIndex(idx)

        # Strip the extension, so that if the user changes the file format,
        # it will not have 2 extensions in a row.
        if base.endswith(default_exporter.EXTENSIONS[0]):
            base = base[:-len(default_exporter.EXTENSIONS[0])]
        dialog.SetFilename(base)

        # Show the dialog and check whether is was accepted or cancelled
        if dialog.ShowModal() != wx.ID_OK:
            return None, default_fmt, export_type

        # New location and name have been selected...
        # Store the path
        path = dialog.GetDirectory()

        # Store the format
        ufmt = uformats[dialog.GetFilterIndex()]
        for f, (uf, _) in formats_to_ext.items():
            if uf == ufmt:
                fmt = f
                break
        else:
            logging.debug("Failed to link %s to a known format", ufmt)
            fmt = default_fmt

        # Check the filename has a good extension, or add the default one
        fn = dialog.GetFilename()
        ext = None
        for extension in formats_to_ext[fmt][1]:
            if fn.endswith(extension) and len(extension) > len(ext or ""):
                ext = extension

        if ext is None:
            if fmt == default_fmt and default_exporter.EXTENSIONS[0] in formats_to_ext[fmt]:
                # if the format is the same (and extension is compatible): keep
                # the extension. This avoid changing the extension if it's not
                # the default one.
                ext = default_exporter.EXTENSIONS[0]
            else:
                ext = formats_to_ext[fmt][1][0]  # default extension
            fn += ext

        fullfn = os.path.join(path, fn)
        # As we strip the extension from the filename, the normal dialog cannot
        # detect we'd overwrite an existing file => show our own warning
        if os.path.exists(fullfn):
            dlg = wx.MessageDialog(self._main_frame,
                                   "A file named \"%s\" already exists.\n"
                                   "Do you want to replace it?" % (fn,),
                                   "File already exists",
                                   wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
            ret = dlg.ShowModal()
            dlg.Destroy()
            if ret == wx.ID_NO:
                return None, default_fmt, export_type

        return fullfn, fmt, export_type
Example #15
0
    def ShowExportFileDialog(self, filename, default_exporter):
        """
        filename (string): full filename to propose by default
        default_exporter (module): default exporter to be used
        return (string or None): the new filename (or the None if the user cancelled)
                (string): the format name
                (string): spatial, AR, spectrum or spectrum-line
        """
        # Find the available formats (and corresponding extensions) according
        # to the export type
        export_type = self.get_export_type(self._data_model.focussedView.value)
        formats_to_ext = self.get_export_formats(export_type)

        # current filename
        path, base = os.path.split(filename)
        uformats_to_ext = OrderedDict(formats_to_ext.values())
        wildcards, uformats = formats_to_wildcards(uformats_to_ext, suffix="")
        dialog = wx.FileDialog(self._main_frame,
                               message="Choose a filename and destination",
                               defaultDir=path,
                               defaultFile="",
                               style=wx.FD_SAVE,  # | wx.FD_OVERWRITE_PROMPT,
                               wildcard=wildcards)

        # Select the default format
        default_fmt = default_exporter.FORMAT
        try:
            uf = formats_to_ext[default_fmt][0]
            idx = uformats.index(uf)
        except ValueError:
            idx = 0
        dialog.SetFilterIndex(idx)

        # Strip the extension, so that if the user changes the file format,
        # it will not have 2 extensions in a row.
        if base.endswith(default_exporter.EXTENSIONS[0]):
            base = base[:-len(default_exporter.EXTENSIONS[0])]
        dialog.SetFilename(base)

        # Show the dialog and check whether is was accepted or cancelled
        if dialog.ShowModal() != wx.ID_OK:
            return None, default_fmt, export_type

        # New location and name have been selected...
        # Store the path
        path = dialog.GetDirectory()

        # Store the format
        ufmt = uformats[dialog.GetFilterIndex()]
        for f, (uf, _) in formats_to_ext.items():
            if uf == ufmt:
                fmt = f
                break
        else:
            logging.debug("Failed to link %s to a known format", ufmt)
            fmt = default_fmt

        # Check the filename has a good extension, or add the default one
        fn = dialog.GetFilename()
        ext = None
        for extension in formats_to_ext[fmt][1]:
            if fn.endswith(extension) and len(extension) > len(ext or ""):
                ext = extension

        if ext is None:
            if fmt == default_fmt and default_exporter.EXTENSIONS[0] in formats_to_ext[fmt]:
                # if the format is the same (and extension is compatible): keep
                # the extension. This avoid changing the extension if it's not
                # the default one.
                ext = default_exporter.EXTENSIONS[0]
            else:
                ext = formats_to_ext[fmt][1][0]  # default extension
            fn += ext

        fullfn = os.path.join(path, fn)
        # As we strip the extension from the filename, the normal dialog cannot
        # detect we'd overwrite an existing file => show our own warning
        if os.path.exists(fullfn):
            dlg = wx.MessageDialog(self._main_frame,
                                   "A file named \"%s\" already exists.\n"
                                   "Do you want to replace it?" % (fn,),
                                   "File already exists",
                                   wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
            ret = dlg.ShowModal()
            dlg.Destroy()
            if ret == wx.ID_NO:
                return None, default_fmt, export_type

        return fullfn, fmt, export_type
Example #16
0
class TileAcqPlugin(Plugin):
    name = "Tile acquisition"
    __version__ = "1.7"
    __author__ = u"Éric Piel, Philip Winkler"
    __license__ = "GPLv2"

    # Describe how the values should be displayed
    # See odemis.gui.conf.data for all the possibilities
    vaconf = OrderedDict((
        (
            "nx",
            {
                "label": "Tiles X",
                "control_type": odemis.gui.CONTROL_INT,  # no slider
            }),
        (
            "ny",
            {
                "label": "Tiles Y",
                "control_type": odemis.gui.CONTROL_INT,  # no slider
            }),
        ("overlap", {
            "tooltip": "Approximate amount of overlapping area between tiles",
        }),
        ("filename", {
            "tooltip":
            "Pattern of each filename",
            "control_type":
            odemis.gui.CONTROL_SAVE_FILE,
            "wildcard":
            formats_to_wildcards(get_available_formats(os.O_WRONLY))[0],
        }),
        ("stitch", {
            "tooltip":
            "Use all the tiles to create a large-scale image at the end of the acquisition",
        }),
        ("expectedDuration", {}),
        ("totalArea", {
            "tooltip": "Approximate area covered by all the streams"
        }),
        ("fineAlign", {
            "label": "Fine alignment",
        })))

    def __init__(self, microscope, main_app):
        super(TileAcqPlugin, self).__init__(microscope, main_app)

        self._dlg = None
        self._tab = None  # the acquisition tab
        self.ft = model.InstantaneousFuture()  # acquisition future
        self.microscope = microscope

        # Can only be used with a microscope
        if not microscope:
            return
        else:
            # Check if microscope supports tiling (= has a sample stage)
            main_data = self.main_app.main_data
            if main_data.stage:
                self.addMenu("Acquisition/Tile...\tCtrl+G", self.show_dlg)
            else:
                logging.info(
                    "Tile acquisition not available as no stage present")
                return

        self._ovrl_stream = None  # stream for fine alignment

        self.nx = model.IntContinuous(5, (1, 1000), setter=self._set_nx)
        self.ny = model.IntContinuous(5, (1, 1000), setter=self._set_ny)
        self.overlap = model.FloatContinuous(20, (1, 80), unit="%")
        self.filename = model.StringVA("a.ome.tiff")
        self.expectedDuration = model.VigilantAttribute(1,
                                                        unit="s",
                                                        readonly=True)
        self.totalArea = model.TupleVA((1, 1), unit="m", readonly=True)
        self.stitch = model.BooleanVA(True)
        self.fineAlign = model.BooleanVA(False)
        # TODO: manage focus (eg, autofocus or ask to manual focus on the corners
        # of the ROI and linearly interpolate)

        self.nx.subscribe(self._update_exp_dur)
        self.ny.subscribe(self._update_exp_dur)
        self.fineAlign.subscribe(self._update_exp_dur)
        self.nx.subscribe(self._update_total_area)
        self.ny.subscribe(self._update_total_area)
        self.overlap.subscribe(self._update_total_area)

        # Warn if memory will be exhausted
        self.nx.subscribe(self._memory_check)
        self.ny.subscribe(self._memory_check)
        self.stitch.subscribe(self._memory_check)

    def _can_fine_align(self, streams):
        """
        Return True if with the given streams it would make sense to fine align
        streams (iterable of Stream)
        return (bool): True if at least a SEM and an optical stream are present
        """
        # check for a SEM stream
        for s in streams:
            if isinstance(s, EMStream):
                break
        else:
            return False

        # check for an optical stream
        # TODO: allow it also for ScannedFluoStream once fine alignment is supported
        # on confocal SECOM.
        for s in streams:
            if isinstance(s, stream.OpticalStream) and not isinstance(
                    s, stream.ScannedFluoStream):
                break
        else:
            return False

        return True

    def _get_visible_streams(self):
        """
        Returns the streams set as visible in the acquisition dialog
        """
        if not self._dlg:
            return []
        ss = self._dlg.view.getStreams() + self._dlg.hidden_view.getStreams()
        logging.debug("View has %d streams", len(ss))
        return ss

    def _get_new_filename(self):
        conf = get_acqui_conf()
        return os.path.join(
            conf.last_path,
            u"%s%s" % (time.strftime("%Y%m%d-%H%M%S"), conf.last_extension))

    def _on_streams_change(self, _=None):
        ss = self._get_visible_streams()
        # Subscribe to all relevant setting changes
        for s in ss:
            for va in self._get_settings_vas(s):
                va.subscribe(self._update_exp_dur)
                va.subscribe(self._memory_check)

        # Disable fine alignment if it's not possible
        if self._dlg:
            for entry in self._dlg.setting_controller.entries:
                if hasattr(entry, "vigilattr"):
                    if entry.vigilattr == self.fineAlign:
                        if self._can_fine_align(ss):
                            entry.lbl_ctrl.Enable(True)
                            entry.value_ctrl.Enable(True)
                            self._ovrl_stream = self._create_overlay_stream(ss)
                        else:
                            entry.lbl_ctrl.Enable(False)
                            entry.value_ctrl.Enable(False)
                        break

    def _unsubscribe_vas(self):
        ss = self._get_live_streams()

        # Unsubscribe from all relevant setting changes
        for s in ss:
            for va in self._get_settings_vas(s):
                va.unsubscribe(self._update_exp_dur)
                va.unsubscribe(self._memory_check)

    def _update_exp_dur(self, _=None):
        """
        Called when VA that affects the expected duration is changed
        """
        tat = self.estimate_time()

        # Typically there are a few more pixels inserted at the beginning of
        # each line for the settle time of the beam. We don't take this into
        # account and so tend to slightly under-estimate.

        # Use _set_value as it's read only
        self.expectedDuration._set_value(math.ceil(tat), force_write=True)

    def _update_total_area(self, _=None):
        """
        Called when VA that affects the total area is changed
        """
        # Find the stream with the smallest FoV
        try:
            fov = self._guess_smallest_fov()
        except ValueError as ex:
            logging.debug("Cannot compute total area: %s", ex)
            return

        # * number of tiles - overlap
        nx = self.nx.value
        ny = self.ny.value
        logging.debug("Updating total area based on FoV = %s m x (%d x %d)",
                      fov, nx, ny)
        ta = (fov[0] * (nx - (nx - 1) * self.overlap.value / 100),
              fov[1] * (ny - (ny - 1) * self.overlap.value / 100))

        # Use _set_value as it's read only
        self.totalArea._set_value(ta, force_write=True)

    def _set_nx(self, nx):
        """
        Check that stage limit is not exceeded during acquisition of nx tiles.
        It automatically clips the maximum value.
        """
        stage = self.main_app.main_data.stage
        orig_pos = stage.position.value
        tile_size = self._guess_smallest_fov()
        overlap = 1 - self.overlap.value / 100
        tile_pos_x = orig_pos["x"] + self.nx.value * tile_size[0] * overlap

        # The acquisition region only extends to the right and to the bottom, never
        # to the left of the top of the current position, so it is not required to
        # check the distance to the top and left edges of the stage.
        if hasattr(stage.axes["x"], "range"):
            max_x = stage.axes["x"].range[1]
            if tile_pos_x > max_x:
                nx = max(
                    1, int((max_x - orig_pos["x"]) / (overlap * tile_size[0])))
                logging.info(
                    "Restricting number of tiles in x direction to %i due to stage limit.",
                    nx)
        return nx

    def _set_ny(self, ny):
        """
        Check that stage limit is not exceeded during acquisition of ny tiles.
        It automatically clips the maximum value.
        """
        stage = self.main_app.main_data.stage
        orig_pos = stage.position.value
        tile_size = self._guess_smallest_fov()
        overlap = 1 - self.overlap.value / 100
        tile_pos_y = orig_pos["y"] - self.ny.value * tile_size[1] * overlap

        if hasattr(stage.axes["y"], "range"):
            min_y = stage.axes["y"].range[0]
            if tile_pos_y < min_y:
                ny = max(
                    1,
                    int(-(min_y - orig_pos["y"]) / (overlap * tile_size[1])))
                logging.info(
                    "Restricting number of tiles in y direction to %i due to stage limit.",
                    ny)

        return ny

    def _guess_smallest_fov(self):
        """
        Return (float, float): smallest width and smallest height of all the FoV
          Note: they are not necessarily from the same FoV.
        raise ValueError: If no stream selected
        """
        ss = self._get_live_streams()
        for s in ss:
            if isinstance(s, StaticStream):
                ss.remove(s)
        fovs = [self._get_fov(s) for s in ss]
        if not fovs:
            raise ValueError("No stream so no FoV, so no minimum one")

        return (min(f[0] for f in fovs), min(f[1] for f in fovs))

    def show_dlg(self):
        # TODO: if there is a chamber, only allow if there is vacuum

        # Fail if the live tab is not selected
        self._tab = self.main_app.main_data.tab.value
        if self._tab.name not in ("secom_live", "sparc_acqui"):
            box = wx.MessageDialog(
                self.main_app.main_frame,
                "Tiled acquisition must be done from the acquisition tab.",
                "Tiled acquisition not possible", wx.OK | wx.ICON_STOP)
            box.ShowModal()
            box.Destroy()
            return

        self._tab.streambar_controller.pauseStreams()

        # If no ROI is selected, select entire area
        try:
            if self._tab.tab_data_model.semStream.roi.value == UNDEFINED_ROI:
                self._tab.tab_data_model.semStream.roi.value = (0, 0, 1, 1)
        except AttributeError:
            pass  # Not a SPARC

        # Disable drift correction (on SPARC)
        if hasattr(self._tab.tab_data_model, "driftCorrector"):
            self._tab.tab_data_model.driftCorrector.roi.value = UNDEFINED_ROI

        ss = self._get_live_streams()
        self.filename.value = self._get_new_filename()

        dlg = AcquisitionDialog(
            self, "Tiled acquisition",
            "Acquire a large area by acquiring the streams multiple "
            "times over a grid.")
        self._dlg = dlg
        # don't allow adding/removing streams
        self._dlg.streambar_controller.to_static_mode()

        dlg.addSettings(self, self.vaconf)
        for s in ss:
            if isinstance(
                    s,
                (ARStream, SpectrumStream, MonochromatorSettingsStream)):
                # TODO: instead of hard-coding the list, a way to detect the type
                # of live image?
                logging.info(
                    "Not showing stream %s, for which the live image is not spatial",
                    s)
                dlg.addStream(s, index=None)
            else:
                dlg.addStream(s, index=0)

        dlg.addButton("Cancel")
        dlg.addButton("Acquire", self.acquire, face_colour='blue')

        # Update acq time and area when streams are added/removed. Add stream settings
        # to subscribed vas.
        dlg.view.stream_tree.flat.subscribe(self._update_exp_dur, init=True)
        dlg.view.stream_tree.flat.subscribe(self._update_total_area, init=True)
        dlg.view.stream_tree.flat.subscribe(self._on_streams_change, init=True)

        # Default fineAlign to True if it's possible
        # Use live streams to make the decision since visible streams might not be initialized yet
        # TODO: the visibility of the streams seems to be reset when the plugin is started,
        # a stream that is invisible in the main panel becomes visible. This should be fixed.
        if self._can_fine_align(ss):
            self.fineAlign.value = True
            self._ovrl_stream = self._create_overlay_stream(ss)

        # This looks tautologic, but actually, it forces the setter to check the
        # value is within range, and will automatically reduce it if necessary.
        self.nx.value = self.nx.value
        self.ny.value = self.ny.value
        self._memory_check()

        # TODO: disable "acquire" button if no stream selected.

        ans = dlg.ShowModal()
        if ans == 0 or ans == wx.ID_CANCEL:
            logging.info("Tiled acquisition cancelled")
            self.ft.cancel()
        elif ans == 1:
            logging.info("Tiled acquisition completed")
        else:
            logging.warning("Got unknown return code %s", ans)

        # Don't hold references
        self._unsubscribe_vas()
        dlg.Destroy()
        self._dlg = None

    # black list of VAs name which are known to not affect the acquisition time
    VAS_NO_ACQUSITION_EFFECT = ("image", "autoBC", "intensityRange",
                                "histogram", "is_active", "should_update",
                                "status", "name", "tint")

    def _create_overlay_stream(self, streams):
        for s in streams:
            if isinstance(s, EMStream):
                em_det = s.detector
                em_emt = s.emitter
            elif isinstance(s, stream.OpticalStream) and not isinstance(
                    s, stream.ScannedFluoStream):
                opt_det = s.detector
        main_data = self.main_app.main_data
        st = stream.OverlayStream("Fine alignment",
                                  opt_det,
                                  em_emt,
                                  em_det,
                                  opm=main_data.opm)
        st.dwellTime.value = main_data.fineAlignDwellTime.value
        return st

    def _get_settings_vas(self, stream):
        """
        Find all the VAs of a stream which can potentially affect the acquisition time
        return (set of VAs)
        """

        nvas = model.getVAs(stream)  # name -> va
        vas = set()
        # remove some VAs known to not affect the acquisition time
        for n, va in nvas.items():
            if n not in self.VAS_NO_ACQUSITION_EFFECT:
                vas.add(va)
        return vas

    def _get_live_streams(self):
        """
        Return all the live streams for tiled acquisition present in the given tab
        """
        tab_data = self._tab.tab_data_model
        ss = list(tab_data.streams.value)

        # On the SPARC, there is a Spot stream, which we don't need for live
        if hasattr(tab_data, "spotStream"):
            try:
                ss.remove(tab_data.spotStream)
            except ValueError:
                pass  # spotStream was not there anyway

        for s in ss:
            if isinstance(s, StaticStream):
                ss.remove(s)
        return ss

    def _get_acq_streams(self):
        """
        Return the streams that should be used for acquisition
        all_ss (list of Streams): all acquisition streams possibly including overlay stream
        stitch_ss (list of Streams): acquisition streams to be used for stitching (no overlay stream)
        """
        # On the SPARC, the acquisition streams are not the same as the live
        # streams. On the SECOM/DELPHI, they are the same (for now)
        live_st = self._get_visible_streams()
        tab_data = self._tab.tab_data_model

        if hasattr(tab_data, "acquisitionStreams"):
            acq_st = tab_data.acquisitionStreams
            # Discard the acquisition streams which are not visible
            stitch_ss = []
            for acs in acq_st:
                if (acs in live_st
                        or (isinstance(acs, MultipleDetectorStream)
                            and any(subs in live_st for subs in acs.streams))):
                    stitch_ss.append(acs)
        else:
            # No special acquisition streams
            stitch_ss = live_st[:]

        # Add the overlay stream if requested
        all_ss = stitch_ss[:]
        if self.fineAlign.value and self._can_fine_align(live_st):
            all_ss = stitch_ss + [self._ovrl_stream]
        return all_ss, stitch_ss

    def _generate_scanning_indices(self, rep):
        """
        Generate the explicit X/Y position of each tile, in the scanning order
        rep (int, int): X, Y number of tiles
        return (generator of tuple(int, int)): x/y positions, starting from 0,0
        """
        # For now we do forward/backward on X (fast), and Y (slowly)
        direction = 1
        for iy in range(rep[1]):
            if direction == 1:
                for ix in range(rep[0]):
                    yield (ix, iy)
            else:
                for ix in range(rep[0] - 1, -1, -1):
                    yield (ix, iy)

            direction *= -1

    def _move_to_tile(self, idx, orig_pos, tile_size, prev_idx):
        # Go left/down, with every second line backward:
        # similar to writing/scanning convention, but move of just one unit
        # every time.
        # A-->-->-->--v
        #             |
        # v--<--<--<---
        # |
        # --->-->-->--Z
        overlap = 1 - self.overlap.value / 100
        # don't move on the axis that is not supposed to have changed
        m = {}
        idx_change = numpy.subtract(idx, prev_idx)
        if idx_change[0]:
            m["x"] = orig_pos["x"] + idx[0] * tile_size[0] * overlap
        if idx_change[1]:
            m["y"] = orig_pos["y"] - idx[1] * tile_size[1] * overlap

        logging.debug("Moving to tile %s at %s m", idx, m)
        f = self.main_app.main_data.stage.moveAbs(m)
        try:
            speed = 10e-6  # m/s. Assume very low speed for timeout.
            t = math.hypot(tile_size[0] * overlap,
                           tile_size[1] * overlap) / speed + 1
            # add 1 to make sure it doesn't time out in case of a very small move
            f.result(t)
        except TimeoutError:
            logging.warning("Failed to move to tile %s", idx)
            self.ft.running_subf.cancel()
            # Continue acquiring anyway... maybe it has moved somewhere near

    def _get_fov(self, sd):
        """
        sd (Stream or DataArray): If it's a stream, it must be a live stream,
          and the FoV will be estimated based on the settings.
        return (float, float): width, height in m
        """
        if isinstance(sd, model.DataArray):
            # The actual FoV, as the data recorded it
            return (sd.shape[0] * sd.metadata[model.MD_PIXEL_SIZE][0],
                    sd.shape[1] * sd.metadata[model.MD_PIXEL_SIZE][1])
        elif isinstance(sd, Stream):
            # Estimate the FoV, based on the emitter/detector settings
            if isinstance(sd, SEMStream):
                ebeam = sd.emitter
                return (ebeam.shape[0] * ebeam.pixelSize.value[0],
                        ebeam.shape[1] * ebeam.pixelSize.value[1])

            elif isinstance(sd, CameraStream):
                ccd = sd.detector
                # Look at what metadata the images will get
                md = ccd.getMetadata().copy()
                img.mergeMetadata(
                    md)  # apply correction info from fine alignment

                shape = ccd.shape[0:2]
                pxs = md[model.MD_PIXEL_SIZE]
                # compensate for binning
                binning = ccd.binning.value
                pxs = [p / b for p, b in zip(pxs, binning)]
                return shape[0] * pxs[0], shape[1] * pxs[1]

            elif isinstance(sd, RepetitionStream):
                # CL, Spectrum, AR
                ebeam = sd.emitter
                global_fov = (ebeam.shape[0] * ebeam.pixelSize.value[0],
                              ebeam.shape[1] * ebeam.pixelSize.value[1])
                l, t, r, b = sd.roi.value
                fov = abs(r - l) * global_fov[0], abs(b - t) * global_fov[1]
                return fov
            else:
                raise TypeError("Unsupported Stream %s" % (sd, ))
        else:
            raise TypeError("Unsupported object")

    def _cancel_acquisition(self, future):
        """
        Canceler of acquisition task.
        """
        logging.debug("Canceling acquisition...")

        with future._task_lock:
            if future._task_state == FINISHED:
                return False
            future._task_state = CANCELLED
            future.running_subf.cancel()
            logging.debug("Acquisition cancelled.")
        return True

    STITCH_SPEED = 1e-8  # s/px
    MOVE_SPEED = 1e3  # s/m

    def estimate_time(self, remaining=None):
        """
        Estimates duration for acquisition and stitching.
        """
        ss, stitch_ss = self._get_acq_streams()

        if remaining is None:
            remaining = self.nx.value * self.ny.value
        acqt = acqmng.estimateTime(ss)

        if self.stitch.value:
            # Estimate stitching time based on number of pixels in the overlapping part
            max_pxs = 0
            for s in stitch_ss:
                for sda in s.raw:
                    pxs = sda.shape[0] * sda.shape[1]
                    if pxs > max_pxs:
                        max_pxs = pxs

            stitcht = self.nx.value * self.ny.value * max_pxs * self.overlap.value * self.STITCH_SPEED
        else:
            stitcht = 0

        try:
            movet = max(
                self._guess_smallest_fov()) * self.MOVE_SPEED * (remaining - 1)
            # current tile is part of remaining, so no need to move there
        except ValueError:  # no current streams
            movet = 0.5

        return acqt * remaining + movet + stitcht

    def sort_das(self, das, ss):
        """
        Sorts das based on priority for stitching, i.e. largest SEM da first, then
        other SEM das, and finally das from other streams.
        das: list of DataArrays
        ss: streams from which the das were extracted

        returns: list of DataArrays, reordered input
        """
        # Add the ACQ_TYPE metadata (in case it's not there)
        # In practice, we check the stream the DA came from, and based on the stream
        # type, fill the metadata
        # TODO: make sure acquisition type is added to data arrays before, so this
        # code can be deleted
        for da in das:
            if model.MD_ACQ_TYPE in da.metadata:
                continue
            for s in ss:
                for sda in s.raw:
                    if da is sda:  # Found it!
                        if isinstance(s, EMStream):
                            da.metadata[model.MD_ACQ_TYPE] = model.MD_AT_EM
                        elif isinstance(s, ARStream):
                            da.metadata[model.MD_ACQ_TYPE] = model.MD_AT_AR
                        elif isinstance(s, SpectrumStream):
                            da.metadata[
                                model.MD_ACQ_TYPE] = model.MD_AT_SPECTRUM
                        elif isinstance(s, FluoStream):
                            da.metadata[model.MD_ACQ_TYPE] = model.MD_AT_FLUO
                        elif isinstance(s, MultipleDetectorStream):
                            if model.MD_OUT_WL in da.metadata:
                                da.metadata[model.MD_ACQ_TYPE] = model.MD_AT_CL
                            else:
                                da.metadata[model.MD_ACQ_TYPE] = model.MD_AT_EM
                        else:
                            logging.warning("Unknown acq stream type for %s",
                                            s)
                        break
                if model.MD_ACQ_TYPE in da.metadata:
                    # if da is found, no need to search other streams
                    break
            else:
                logging.warning("Couldn't find the stream for DA of shape %s",
                                da.shape)

        # save tiles for stitching
        if self.stitch.value:
            # Remove the DAs we don't want to (cannot) stitch
            das = [da for da in das if da.metadata[model.MD_ACQ_TYPE] \
                   not in (model.MD_AT_AR, model.MD_AT_SPECTRUM)]

            def leader_quality(da):
                """
                return int: The bigger the more leadership
                """
                # For now, we prefer a lot the EM images, because they are usually the
                # one with the smallest FoV and the most contrast
                if da.metadata[model.MD_ACQ_TYPE] == model.MD_AT_EM:
                    return numpy.prod(
                        da.shape)  # More pixel to find the overlap
                elif da.metadata[model.MD_ACQ_TYPE]:
                    # A lot less likely
                    return numpy.prod(da.shape) / 100

            das.sort(key=leader_quality, reverse=True)
            das = tuple(das)
        return das

    def _check_fov(self, das, sfov):
        """
        Checks the fov based on the data arrays.
        das: list of DataArryas
        sfov: previous estimate for the fov
        """
        afovs = [self._get_fov(d) for d in das]
        asfov = (min(f[1] for f in afovs), min(f[0] for f in afovs))
        if not all(util.almost_equal(e, a) for e, a in zip(sfov, asfov)):
            logging.warning("Unexpected min FoV = %s, instead of %s", asfov,
                            sfov)
            sfov = asfov
        return sfov

    def _estimateStreamPixels(self, s):
        """
        return (int): the number of pixels the stream will generate during an
          acquisition
        """
        px = 0
        if isinstance(s, MultipleDetectorStream):
            for st in s.streams:
                # For the EMStream of a SPARC MDStream, it's just one pixel per
                # repetition (excepted in case  of fuzzing, but let's be optimistic)
                if isinstance(st, (EMStream, CLStream)):
                    px += 1
                else:
                    px += self._estimateStreamPixels(st)

            if hasattr(s, 'repetition'):
                px *= s.repetition.value[0] * s.repetition.value[1]

            return px
        elif isinstance(s, (ARStream, SpectrumStream)):
            # Temporarily reports 0 px, as we don't stitch these streams for now
            return 0

        if hasattr(s, 'emtResolution'):
            px = numpy.prod(s.emtResolution.value)
        elif hasattr(s, 'detResolution'):
            px = numpy.prod(s.detResolution.value)
        elif model.hasVA(s.detector, "resolution"):
            px = numpy.prod(s.detector.resolution.value)
        elif model.hasVA(s.emitter, "resolution"):
            px = numpy.prod(s.emitter.resolution.value)
        else:
            # This shouldn't happen, but let's "optimistic" by assuming it'll
            # only acquire one pixel.
            logging.info("Resolution of stream %s cannot be determined.", s)
            px = 1

        return px

    MEMPP = 22  # bytes per pixel, found empirically

    @call_in_wx_main
    def _memory_check(self, _=None):
        """
        Makes an estimate for the amount of memory that will be consumed during
        stitching and compares it to the available memory on the computer.
        Displays a warning if memory exceeds available memory.
        """
        if not self._dlg:  # Already destroyed? => no need to care
            return

        if self.stitch.value:
            # Number of pixels for acquisition
            pxs = sum(
                self._estimateStreamPixels(s)
                for s in self._get_acq_streams()[1])
            pxs *= self.nx.value * self.ny.value

            # Memory calculation
            mem_est = pxs * self.MEMPP
            mem_computer = psutil.virtual_memory().total
            logging.debug("Estimating %g GB needed, while %g GB available",
                          mem_est / 1024**3, mem_computer / 1024**3)
            # Assume computer is using 2 GB RAM for odemis and other programs
            mem_sufficient = mem_est < mem_computer - (2 * 1024**3)
        else:
            mem_sufficient = True

        # Display warning
        if mem_sufficient:
            self._dlg.setAcquisitionInfo(None)
        else:
            txt = (
                "Stitching this area requires %.1f GB of memory.\n"
                "Running the acquisition might cause your computer to crash." %
                (mem_est / 1024**3, ))
            self._dlg.setAcquisitionInfo(txt, lvl=logging.ERROR)

    def acquire(self, dlg):
        main_data = self.main_app.main_data
        str_ctrl = self._tab.streambar_controller
        str_ctrl.pauseStreams()
        dlg.pauseSettings()
        self._unsubscribe_vas()

        orig_pos = main_data.stage.position.value
        trep = (self.nx.value, self.ny.value)
        nb = trep[0] * trep[1]
        # It's not a big deal if it was a bad guess as we'll use the actual data
        # before the first move
        sfov = self._guess_smallest_fov()
        fn = self.filename.value
        exporter = dataio.find_fittest_converter(fn)
        fn_bs, fn_ext = udataio.splitext(fn)

        ss, stitch_ss = self._get_acq_streams()
        end = self.estimate_time() + time.time()

        ft = model.ProgressiveFuture(end=end)
        self.ft = ft  # allows future to be canceled in show_dlg after closing window
        ft.running_subf = model.InstantaneousFuture()
        ft._task_state = RUNNING
        ft._task_lock = threading.Lock()
        ft.task_canceller = self._cancel_acquisition  # To allow cancelling while it's running
        ft.set_running_or_notify_cancel()  # Indicate the work is starting now
        dlg.showProgress(ft)

        # For stitching only
        da_list = []  # for each position, a list of DataArrays
        i = 0
        prev_idx = [0, 0]
        try:
            for ix, iy in self._generate_scanning_indices(trep):
                logging.debug("Acquiring tile %dx%d", ix, iy)
                self._move_to_tile((ix, iy), orig_pos, sfov, prev_idx)
                prev_idx = ix, iy
                # Update the progress bar
                ft.set_progress(end=self.estimate_time(nb - i) + time.time())

                ft.running_subf = acqmng.acquire(
                    ss, self.main_app.main_data.settings_obs)
                das, e = ft.running_subf.result(
                )  # blocks until all the acquisitions are finished
                if e:
                    logging.warning(
                        "Acquisition for tile %dx%d partially failed: %s", ix,
                        iy, e)

                if ft._task_state == CANCELLED:
                    raise CancelledError()

                # TODO: do in a separate thread
                fn_tile = "%s-%.5dx%.5d%s" % (fn_bs, ix, iy, fn_ext)
                logging.debug("Will save data of tile %dx%d to %s", ix, iy,
                              fn_tile)
                exporter.export(fn_tile, das)

                if ft._task_state == CANCELLED:
                    raise CancelledError()

                if self.stitch.value:
                    # Sort tiles (largest sem on first position)
                    da_list.append(self.sort_das(das, stitch_ss))

                # Check the FoV is correct using the data, and if not update
                if i == 0:
                    sfov = self._check_fov(das, sfov)
                i += 1

            # Move stage to original position
            main_data.stage.moveAbs(orig_pos)

            # Stitch SEM and CL streams
            st_data = []
            if self.stitch.value and (not da_list or not da_list[0]):
                # if only AR or Spectrum are acquired
                logging.warning(
                    "No stream acquired that can be used for stitching.")
            elif self.stitch.value:
                logging.info("Acquisition completed, now stitching...")
                ft.set_progress(end=self.estimate_time(0) + time.time())

                logging.info("Computing big image out of %d images",
                             len(da_list))
                das_registered = stitching.register(da_list)

                # Select weaving method
                # On a Sparc system the mean weaver gives the best result since it
                # smoothes the transitions between tiles. However, using this weaver on the
                # Secom/Delphi generates an image with dark stripes in the overlap regions which are
                # the result of carbon decomposition effects that typically occur in samples imaged
                # by these systems. To mediate this, we use the
                # collage_reverse weaver that only shows the overlap region of the tile that
                # was imaged first.
                if self.microscope.role in ("secom", "delphi"):
                    weaving_method = WEAVER_COLLAGE_REVERSE
                    logging.info(
                        "Using weaving method WEAVER_COLLAGE_REVERSE.")
                else:
                    weaving_method = WEAVER_MEAN
                    logging.info("Using weaving method WEAVER_MEAN.")

                # Weave every stream
                if isinstance(das_registered[0], tuple):
                    for s in range(len(das_registered[0])):
                        streams = []
                        for da in das_registered:
                            streams.append(da[s])
                        da = stitching.weave(streams, weaving_method)
                        da.metadata[
                            model.MD_DIMS] = "YX"  # TODO: do it in the weaver
                        st_data.append(da)
                else:
                    da = stitching.weave(das_registered, weaving_method)
                    st_data.append(da)

                # Save
                exporter = dataio.find_fittest_converter(fn)
                if exporter.CAN_SAVE_PYRAMID:
                    exporter.export(fn, st_data, pyramid=True)
                else:
                    logging.warning(
                        "File format doesn't support saving image in pyramidal form"
                    )
                    exporter.export(fn, st_data)

            ft.set_result(None)  # Indicate it's over

            # End of the (completed) acquisition
            if ft._task_state == CANCELLED:
                raise CancelledError()
            dlg.Close()

            # Open analysis tab
            if st_data:
                popup.show_message(self.main_app.main_frame,
                                   "Tiled acquisition complete",
                                   "Will display stitched image")
                self.showAcquisition(fn)
            else:
                popup.show_message(self.main_app.main_frame,
                                   "Tiled acquisition complete",
                                   "Will display last tile")
                # It's easier to know the last filename, and it's also the most
                # interesting for the user, as if something went wrong (eg, focus)
                # it's the tile the most likely to show it.
                self.showAcquisition(fn_tile)

            # TODO: also export a full image (based on reported position, or based
            # on alignment detection)
        except CancelledError:
            logging.debug("Acquisition cancelled")
            dlg.resumeSettings()
        except Exception as ex:
            logging.exception("Acquisition failed.")
            ft.running_subf.cancel()
            ft.set_result(None)
            # Show also in the window. It will be hidden next time a setting is changed.
            self._dlg.setAcquisitionInfo("Acquisition failed: %s" % (ex, ),
                                         lvl=logging.ERROR)
        finally:
            logging.info("Tiled acquisition ended")
            main_data.stage.moveAbs(orig_pos)
Example #17
0
class RGBCLIntensity(Plugin):
    name = "RGB CL-intensity"
    __version__ = "1.2"
    __author__ = u"Toon Coenen & Éric Piel"
    __license__ = "GNU General Public License 2"

    vaconf = OrderedDict((
        ("filter1", {
            "label": "Blue",
            "choices": util.format_band_choices,
        }),
        ("filter2", {
            "label": "Green",
            "choices": util.format_band_choices,
        }),
        ("filter3", {
            "label": "Red",
            "choices": util.format_band_choices,
        }),
        ("filename", {
            "control_type": odemis.gui.CONTROL_SAVE_FILE,
            "wildcard": formats_to_wildcards(get_available_formats(os.O_WRONLY))[0],
        }),
        ("expectedDuration", {
        }),
    ))

    def __init__(self, microscope, main_app):
        super(RGBCLIntensity, self).__init__(microscope, main_app)

        # Can only be used on a SPARC with a CL-intensity detector
        if not microscope:
            return
        try:
            self.ebeam = model.getComponent(role="e-beam")
            self.cldetector = model.getComponent(role="cl-detector")
            self.filterwheel = model.getComponent(role="cl-filter")
            self.sed = model.getComponent(role="se-detector")
            # We could also check the filter wheel has at least 3 filters, but
            # let's not be too picky, if the user has installed the plugin, he
            # probably wants to use it anyway.
        except LookupError:
            logging.info("Hardware not found, cannot use the RGB CL plugin")
            return

        # The SEM survey and CLi stream (will be updated when showing the window)
        self._survey_s = None
        self._cl_int_s = None
        self._acqui_tab = main_app.main_data.getTabByName("sparc_acqui").tab_data_model

        # The settings to be displayed in the dialog
        # TODO: pick better default filters than first 3 filters
        # => based on the wavelengths fitting best RGB, or the names (eg, "Blue"),
        # and avoid "pass-through".
        fbchoices = self.filterwheel.axes["band"].choices
        if isinstance(fbchoices, dict):
            fbvalues = sorted(fbchoices.keys())
        else:
            fbvalues = fbchoices
        # FloatEnumerated because filter positions can be in rad (ie, not int positions)
        self.filter1 = model.FloatEnumerated(fbvalues[0],
                                             choices=fbchoices)
        self.filter2 = model.FloatEnumerated(fbvalues[min(1, len(fbvalues) - 1)],
                                             choices=fbchoices)
        self.filter3 = model.FloatEnumerated(fbvalues[min(2, len(fbvalues) - 1)],
                                             choices=fbchoices)

        self._filters = [self.filter1, self.filter2, self.filter3]
        self._colours = [(0, 0, 255), (0, 255, 0), (255, 0, 0)]  # B, G, R

        self.filename = model.StringVA("a.tiff")
        self.expectedDuration = model.VigilantAttribute(1, unit="s", readonly=True)

        self.addMenu("Acquisition/RGB CL intensity...", self.start)

    def _read_config(self):
        """
        Updates the filter values based on the content of the config file
        It will not fail (if there is no config file, or the config file is incorrect).
        In the worst case, it will not update the filter values.
        """
        try:
            config = configparser.SafeConfigParser()  # Note: in Python 3, this is now also just called "ConfigParser"
            config.read(CONF_FILE)  # Returns empty config if no file
            for fname, va in zip(("blue", "green", "red"), self._filters):
                fval = config.getfloat("filters", fname)
                # Pick the same/closest value if it's available in the choices, always returns something valid
                va.value = odemis.util.find_closest(fval, va.choices)
                logging.debug("Updated %s to %s (from config %s)", fname, va.value, fval)

        except (configparser.NoOptionError, configparser.NoSectionError) as ex:
            logging.info("Config file is not existing or complete, no restoring filter values: %s", ex)
        except Exception:
            logging.exception("Failed to open the config file")

    def _write_config(self):
        """
        Store the filter values into the config file
        """
        try:
            config = configparser.SafeConfigParser()
            config.add_section("filters")
            config.set("filters", "blue", "%f" % self.filter1.value)
            config.set("filters", "green", "%f" % self.filter2.value)
            config.set("filters", "red", "%f" % self.filter3.value)

            with open(CONF_FILE, "w") as configfile:
                config.write(configfile)
        except Exception:
            logging.exception("Failed to save the config file")

    def _update_exp_dur(self, _=None):
        """
        Called when VA that affects the expected duration is changed
        """
        at = self.estimateAcquisitionTime()

        # Use _set_value as it's read only
        self.expectedDuration._set_value(round(at), force_write=True)

    def _calc_acq_times(self):
        """
        Calculate exposure times for different elements of the acquisition.
        return (3 float): in s
        """
        dt_survey = 0
        dt_cl = 0
        dt_drift = 0

        if self._survey_s:
            dt_survey = self._survey_s.estimateAcquisitionTime()

        if self._cl_int_s:
            dt_cl = self._cl_int_s.estimateAcquisitionTime()

        # For each CL filter acquisition, the drift correction will run once
        # (*in addition* to the standard in-frame drift correction)
        dc = self._acqui_tab.driftCorrector
        if dc.roi.value != UNDEFINED_ROI:
            drift_est = drift.AnchoredEstimator(self.ebeam, self.sed,
                                    dc.roi.value, dc.dwellTime.value)
            dt_drift = drift_est.estimateAcquisitionTime() + 0.1

        return dt_survey, dt_cl, dt_drift

    def estimateAcquisitionTime(self):
        """
        Estimate the time it will take for the measurement.
        The number of pixels still has to be defined in the stream part
        """
        dt_survey, dt_cl, dt_drift = self._calc_acq_times()
        return dt_survey + len(self._filters) * (dt_cl + dt_drift)

    def _get_new_filename(self):
        conf = get_acqui_conf()
        # Use TIFF by default, as it's a little bit more user-friendly for simple
        # coloured images.
        return os.path.join(
            conf.last_path,
            u"%s%s" % (time.strftime("%Y%m%d-%H%M%S"), ".tiff")
        )

    def _get_sem_survey(self):
        """
        Finds the SEM survey stream in the acquisition tab
        return (SEMStream or None): None if not found
        """
        tab_data = self.main_app.main_data.tab.value.tab_data_model
        for s in tab_data.streams.value:
            if isinstance(s, stream.SEMStream):
                return s

        logging.warning("No SEM survey stream found")
        return None

    def _get_cl_intensity(self):
        """
        Finds the CL intensity acquisition (aka MD) stream in the acquisition tab
        return (SEMStream or None): None if not found
        """
        tab_data = self.main_app.main_data.tab.value.tab_data_model

        # Look for the MultiDetector stream which contains a CL intensity stream
        for mds in tab_data.acquisitionStreams:
            if not isinstance(mds, stream.MultipleDetectorStream):
                continue
            for ss in mds.streams:
                if isinstance(ss, stream.CLSettingsStream):
                    return mds

        logging.warning("No CL intensity stream found")
        return None

    def _pause_streams(self):
        """
        return (list of streams): the streams paused
        """
        try:
            str_ctrl = self.main_app.main_data.tab.value.streambar_controller
        except AttributeError:  # Odemis v2.6 and earlier versions
            str_ctrl = self.main_app.main_data.tab.value.stream_controller
        return str_ctrl.pauseStreams()

    def start(self):
        # Check the acquisition tab is open, and a CL-intensity stream is available
        ct = self.main_app.main_data.tab.value
        if ct.name == "sparc_acqui":
            cls = self._get_cl_intensity()
        else:
            cls = None
        if not cls:
            logging.info("Failed to start RGB CL intensity stream")
            dlg = wx.MessageDialog(self.main_app.main_frame,
                                   "No CL-intensity stream is currently open.\n"
                                   "You need to open a CL intensity stream "
                                   "and set the acquisition parameters.\n",
                                   caption="RGB CL intensity",
                                   style=wx.OK | wx.ICON_WARNING)
            dlg.ShowModal()
            dlg.Destroy()
            return

        # Normally, since Odemis v3.1, all CLSettingsStreams on systems with a cl-filter
        # have a "local axis" as a VA "axisFilter".
        assert any(hasattr(s, "axisFilter") for s in cls.streams)

        self._pause_streams()

        self._read_config()  # Restore filter values from the config file

        # immediately switch optical path, to save time
        self.main_app.main_data.opm.setPath(cls)  # non-blocking

        # Get survey stream too
        self._survey_s = self._get_sem_survey()
        self._cl_int_s = cls

        self._update_exp_dur()

        # Create a window
        dlg = AcquisitionDialog(self, "RGB CL intensity acquisition",
                                "Acquires a RGB CL-intensity image\n"
                                "Specify the relevant settings and start the acquisition\n"
                                )

        self.filename.value = self._get_new_filename()
        dlg.addSettings(self, conf=self.vaconf)
        dlg.addButton("Close")
        dlg.addButton("Acquire", self.acquire, face_colour='blue')

        # Show the window, and wait until the acquisition is over
        ans = dlg.ShowModal()

        # The window is closed
        if ans == 0:
            logging.debug("RGB CL intensity acquisition cancelled")
        elif ans == 1:
            logging.debug("RGB CL intensity acquisition completed")
        else:
            logging.warning("Unknown return code %d", ans)

        self._write_config()  # Store the filter values to restore them on next time

        # Make sure we don't hold reference to the streams forever
        self._survey_s = None
        self._cl_int_s = None

        dlg.Destroy()

    def acquire(self, dlg):
        # Stop the spot stream and any other stream playing to not interfere with the acquisition
        self._pause_streams()

        # We use the acquisition CL intensity stream, so there is a concurrent
        # SEM acquisition (in addition to the survey). The drift correction is run both
        # during the acquisition, and in-between each acquisition. The drift
        # between each acquisition is corrected by updating the metadata. So
        # it's some kind of post-processing compensation. The advantage is that
        # it doesn't affect the data, and if the entire field of view is imaged,
        # it still works properly, but when opening in another software (eg,
        # ImageJ), that compensation will not be applied automatically).
        # Alternatively, the images could be cropped to just the region which is
        # common for all the acquisitions, but there might then be data loss.
        # Note: The compensation could also be done by updating the ROI of the
        # CL stream. However, in the most common case, the user will acquire the
        # entire area, so drift compensation cannot be applied. We could also
        # use SEM concurrent stream and measure drift afterwards but that
        # doubles the dwell time).
        dt_survey, dt_clint, dt_drift = self._calc_acq_times()
        cl_set_s = next(s for s in self._cl_int_s.streams if hasattr(s, "axisFilter"))

        das = []
        fn = self.filename.value
        exporter = dataio.find_fittest_converter(fn)

        # Prepare the Future to represent the acquisition progress, and cancel
        dur = self.expectedDuration.value
        end = time.time() + dur
        ft = model.ProgressiveFuture(end=end)

        # Allow to cancel by cancelling also the sub-task
        def canceller(future):
            # To be absolutely correct, there should be a lock, however, in
            # practice in the worse case the task will run a little longer before
            # stopping.
            if future._subf:
                logging.debug("Cancelling sub future %s", future._subf)
                return future._subf.cancel()

        ft._subf = None  # sub-future corresponding to the task currently happening
        ft.task_canceller = canceller  # To allow cancelling while it's running

        # Indicate the work is starting now
        ft.set_running_or_notify_cancel()
        dlg.showProgress(ft)

        try:
            # acquisition of SEM survey
            if self._survey_s:
                ft._subf = acqmng.acquire([self._survey_s], self.main_app.main_data.settings_obs)
                d, e = ft._subf.result()
                das.extend(d)
                if e:
                    raise e

            if ft.cancelled():
                raise CancelledError()

            dur -= dt_survey
            ft.set_progress(end=time.time() + dur)

            # Extra drift correction between each filter
            dc_roi = self._acqui_tab.driftCorrector.roi.value
            dc_dt = self._acqui_tab.driftCorrector.dwellTime.value

            # drift correction vector
            tot_dc_vect = (0, 0)
            if dc_roi != UNDEFINED_ROI:
                drift_est = drift.AnchoredEstimator(self.ebeam, self.sed,
                                                    dc_roi, dc_dt)
                drift_est.acquire()
                dur -= dt_drift
                ft.set_progress(end=time.time() + dur)
            else:
                drift_est = None

            # Loop over the filters, for now it's fixed to 3 but this could be flexible
            for fb, co in zip(self._filters, self._colours):
                cl_set_s.axisFilter.value = fb.value
                logging.debug("Using band %s", fb.value)
                ft.set_progress(end=time.time() + dur)

                # acquire CL stream
                ft._subf = acqmng.acquire([self._cl_int_s], self.main_app.main_data.settings_obs)
                d, e = ft._subf.result()
                if e:
                    raise e
                if ft.cancelled():
                    raise CancelledError()
                dur -= dt_clint
                ft.set_progress(end=time.time() + dur)

                if drift_est:
                    drift_est.acquire()
                    dc_vect = drift_est.estimate()
                    pxs = self.ebeam.pixelSize.value
                    tot_dc_vect = (tot_dc_vect[0] + dc_vect[0] * pxs[0],
                                   tot_dc_vect[1] - dc_vect[1] * pxs[1])  # Y is inverted in physical coordinates
                    dur -= dt_drift
                    ft.set_progress(end=time.time() + dur)

                # Convert the CL intensity stream into a "fluo" stream so that it's nicely displayed (in colour) in the viewer
                for da in d:
                    # Update the center position based on drift
                    pos = da.metadata[model.MD_POS]
                    logging.debug("Correcting position for drift by %s m", tot_dc_vect)
                    pos = tuple(p + dc for p, dc in zip(pos, tot_dc_vect))
                    da.metadata[model.MD_POS] = pos

                    if model.MD_OUT_WL not in da.metadata:
                        # check it's not the SEM concurrent stream
                        continue
                    # Force the colour, which forces it to be a FluoStream when
                    # opening it in the analysis tab, for nice colour merging.
                    da.metadata[model.MD_USER_TINT] = co

                das.extend(d)
                if ft.cancelled():
                    raise CancelledError()

            ft.set_result(None)  # Indicate it's over

        except CancelledError as ex:
            logging.debug("Acquisition cancelled")
            return
        except Exception as ex:
            logging.exception("Failure during RGB CL acquisition")
            ft.set_exception(ex)
            # TODO: show the error in the plugin window
            return

        if ft.cancelled() or not das:
            return

        logging.debug("Will save data to %s", fn)
        exporter.export(fn, das)
        self.showAcquisition(fn)
        dlg.Close()
Example #18
0
def ShowAcquisitionFileDialog(parent, filename):
    """
    parent (wxFrame): parent window
    filename (string): full filename to propose by default
    Note: updates the acquisition configuration if the user did pick a new file
    return (string or None): the new filename (or the None if the user cancelled)
    """
    conf = get_acqui_conf()

    # Find the available formats (and corresponding extensions)
    formats_to_ext = dataio.get_available_formats()

    # current filename
    path, base = os.path.split(filename)

    # Note: When setting 'defaultFile' when creating the file dialog, the
    #   first filter will automatically be added to the name. Since it
    #   cannot be changed by selecting a different file type, this is big
    #   nono. Also, extensions with multiple periods ('.') are not correctly
    #   handled. The solution is to use the SetFilename method instead.
    wildcards, formats = formats_to_wildcards(formats_to_ext)
    dialog = wx.FileDialog(parent,
                           message="Choose a filename and destination",
                           defaultDir=path,
                           defaultFile="",
                           style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT,
                           wildcard=wildcards)

    # Select the last format used
    prev_fmt = conf.last_format
    try:
        idx = formats.index(conf.last_format)
    except ValueError:
        idx = 0
    dialog.SetFilterIndex(idx)

    # Strip the extension, so that if the user changes the file format,
    # it will not have 2 extensions in a row.
    if base.endswith(conf.last_extension):
        base = base[:-len(conf.last_extension)]
    dialog.SetFilename(base)

    # Show the dialog and check whether is was accepted or cancelled
    if dialog.ShowModal() != wx.ID_OK:
        return None

    # New location and name have been selected...
    # Store the path
    path = dialog.GetDirectory()
    conf.last_path = path

    # Store the format
    fmt = formats[dialog.GetFilterIndex()]
    conf.last_format = fmt

    # Check the filename has a good extension, or add the default one
    fn = dialog.GetFilename()
    ext = None
    for extension in formats_to_ext[fmt]:
        if fn.endswith(extension) and len(extension) > len(ext or ""):
            ext = extension

    if ext is None:
        if fmt == prev_fmt and conf.last_extension in formats_to_ext[fmt]:
            # if the format is the same (and extension is compatible): keep
            # the extension. This avoid changing the extension if it's not
            # the default one.
            ext = conf.last_extension
        else:
            ext = formats_to_ext[fmt][0] # default extension
        fn += ext

    conf.last_extension = ext

    return os.path.join(path, fn)
Example #19
0
class ARspectral(Plugin):
    name = "AR/Spectral"
    __version__ = "2.5"
    __author__ = "Toon Coenen"
    __license__ = "GNU General Public License 2"

    vaconf = OrderedDict((
        ("stepsize", {
            "tooltip": "Distance between the center of each pixel",
            "scale": "log",
        }),
        ("res", {
            "control_type": odemis.gui.CONTROL_READONLY,
            "label": "repetition",
        }),
        (
            "roi",
            {
                "control_type":
                odemis.gui.CONTROL_NONE,  # TODO: CONTROL_READONLY to show it
            }),
        (
            "centerWavelength",
            {
                "control_type": odemis.gui.CONTROL_FLT,  # no slider
            }),
        ("grating", {}),
        (
            "slitWidth",
            {
                "control_type": odemis.gui.CONTROL_FLT,  # no slider
            }),
        ("dwellTime", {
            "tooltip": "Time spent by the e-beam on each pixel",
            "range": (1e-9, 360),
            "scale": "log",
        }),
        ("nDC", {
            "tooltip": "Number of drift corrections per pixel",
            "range": (1, 100),
            "label": "Drif cor. per pixel",
        }),
        ("binninghorz", {
            "label": "Hor. binning",
            "tooltip": "Horizontal binning of the CCD",
            "control_type": odemis.gui.CONTROL_RADIO,
        }),
        ("binningvert", {
            "label": "Ver. binning",
            "tooltip": "Vertical binning of the CCD",
            "control_type": odemis.gui.CONTROL_RADIO,
        }),
        ("cam_res", {
            "control_type": odemis.gui.CONTROL_READONLY,
            "label": "Camera resolution",
            "accuracy": None,
        }),
        ("gain", {}),
        ("readoutRate", {}),
        ("filename", {
            "control_type": odemis.gui.CONTROL_SAVE_FILE,
            "wildcard": formats_to_wildcards({hdf5.FORMAT:
                                              hdf5.EXTENSIONS})[0],
        }),
        ("expectedDuration", {}),
    ))

    def __init__(self, microscope, main_app):
        super(ARspectral, self).__init__(microscope, main_app)

        # Can only be used on a Sparc with a CCD
        if not microscope:
            return

        main_data = self.main_app.main_data
        self.ebeam = main_data.ebeam
        self.ccd = main_data.ccd
        self.sed = main_data.sed
        self.sgrh = main_data.spectrograph
        if not all((self.ebeam, self.ccd, self.sed, self.sgrh)):
            logging.debug("Hardware not found, cannot use the plugin")
            return

        # TODO: handle SPARC systems which don't have such hardware
        bigslit = model.getComponent(role="slit-in-big")
        lsw = model.getComponent(role="lens-switch")

        # This is a little tricky: we don't directly need the spectrometer, the
        # 1D image of the CCD, as we are interested in the raw image. However,
        # we care about the wavelengths and the spectrometer might be inverted
        # in order to make sure the wavelength is is the correct direction (ie,
        # lowest pixel = lowest wavelength). So we need to do the same on the
        # raw image. However, there is no "official" way to connect the
        # spectrometer(s) to their raw CCD. So we rely on the fact that
        # typically this is a wrapper, so we can check using the .dependencies.
        wl_inverted = False
        try:
            spec = self._find_spectrometer(self.ccd)
        except LookupError as ex:
            logging.warning("%s, expect that the wavelengths are not inverted",
                            ex)
        else:
            # Found spec => check transpose in X (1 or -1), and invert if it's inverted (-1)
            try:
                wl_inverted = (spec.transpose[0] == -1)
            except Exception as ex:
                # Just in case spec has no .transpose or it's not a tuple
                # (very unlikely as all Detectors have it)
                logging.warning(
                    "%s: expect that the wavelengths are not inverted", ex)

        # the SEM survey stream (will be updated when showing the window)
        self._survey_s = None

        # Create a stream for AR spectral measurement
        self._ARspectral_s = SpectralARScanStream("AR Spectrum", self.ccd,
                                                  self.sed, self.ebeam,
                                                  self.sgrh, lsw, bigslit,
                                                  main_data.opm, wl_inverted)

        # For reading the ROA and anchor ROI
        self._tab = main_data.getTabByName("sparc_acqui")
        self._tab_data = self._tab.tab_data_model

        # The settings to be displayed in the dialog
        # Trick: we use the same VAs as the stream, so they are directly synchronised
        self.centerWavelength = self._ARspectral_s.centerWavelength
        #self.numberOfPixels = self._ARspectral_s.numberOfPixels
        self.dwellTime = self._ARspectral_s.dwellTime
        self.slitWidth = self._ARspectral_s.slitWidth
        self.binninghorz = self._ARspectral_s.binninghorz
        self.binningvert = self._ARspectral_s.binningvert
        self.nDC = self._ARspectral_s.nDC
        self.grating = model.IntEnumerated(
            self.sgrh.position.value["grating"],
            choices=self.sgrh.axes["grating"].choices,
            setter=self._onGrating)
        self.roi = self._ARspectral_s.roi
        self.stepsize = self._ARspectral_s.stepsize
        self.res = model.TupleVA((1, 1), unit="px")
        self.cam_res = model.TupleVA((self.ccd.shape[0], self.ccd.shape[1]),
                                     unit="px")
        self.gain = self.ccd.gain
        self.readoutRate = self.ccd.readoutRate
        self.filename = model.StringVA("a.h5")
        self.expectedDuration = model.VigilantAttribute(1,
                                                        unit="s",
                                                        readonly=True)

        # Update the expected duration when values change, depends both dwell time and # of pixels
        self.dwellTime.subscribe(self._update_exp_dur)
        self.stepsize.subscribe(self._update_exp_dur)
        self.nDC.subscribe(self._update_exp_dur)
        self.readoutRate.subscribe(self._update_exp_dur)
        self.cam_res.subscribe(self._update_exp_dur)

        # subscribe to update X/Y res
        self.stepsize.subscribe(self._update_res)
        self.roi.subscribe(self._update_res)
        #subscribe to binning values for camera res
        self.binninghorz.subscribe(self._update_cam_res)
        self.binningvert.subscribe(self._update_cam_res)

        self.addMenu("Acquisition/AR Spectral...", self.start)

    def _update_exp_dur(self, _=None):
        """
        Called when VA that affects the expected duration is changed
        """
        at = self._ARspectral_s.estimateAcquisitionTime()

        if self._survey_s:
            at += self._survey_s.estimateAcquisitionTime()

        # Use _set_value as it's read only
        self.expectedDuration._set_value(round(at), force_write=True)

    def _update_res(self, _=None):
        """
        Update the scan resolution based on the step size
        """

        sem_width = (self.ebeam.shape[0] * self.ebeam.pixelSize.value[0],
                     self.ebeam.shape[1] * self.ebeam.pixelSize.value[1])
        ROI = self.roi.value
        if ROI == UNDEFINED_ROI:
            ROI = (0, 0, 1, 1)
        logging.info("ROI = %s", ROI)
        stepsize = self.stepsize.value

        # rounded resolution values (rounded down), note deal with resolution 0
        xres = ((ROI[2] - ROI[0]) * sem_width[0]) // stepsize
        yres = ((ROI[3] - ROI[1]) * sem_width[1]) // stepsize

        if xres == 0:
            xres = 1
        if yres == 0:
            yres = 1
        self.res.value = (int(xres), int(yres))

    def _update_cam_res(self, _=None):
        """
        Update spectral camera resolution based on the binning
        """
        cam_xres = self.ccd.shape[0] // self.binninghorz.value
        cam_yres = self.ccd.shape[1] // self.binningvert.value

        self.cam_res.value = (int(cam_xres), int(cam_yres))

    def _onGrating(self, grating):
        """
        Called when the grating VA is changed
        return (int): the actual grating, once the move is over
        """
        f = self.sgrh.moveAbs({"grating": grating})
        f.result()  # wait for the move to finish
        return grating

#    def _update_exp_dur(self, _=None):
#        """
#        Called when VA that affects the expected duration is changed
#        """
#        expt = self._mchr_s.estimateAcquisitionTime()
#        if self._survey_s:
#            expt += self._survey_s.estimateAcquisitionTime()
#
#        # Use _set_value as it's read only
#        self.expectedDuration._set_value(expt, force_write=True)

    def _get_new_filename(self):
        conf = get_acqui_conf()
        return os.path.join(conf.last_path,
                            u"%s%s" % (time.strftime("%Y%m%d-%H%M%S"), ".h5"))

    def _get_sem_survey(self):
        """
        Finds the SEM survey stream in the acquisition tab
        return (SEMStream or None): None if not found
        """
        for s in self._tab_data.streams.value:
            if isinstance(s, stream.SEMStream):
                return s

        logging.warning("No SEM survey stream found")
        return None

    def _find_spectrometer(self, detector):
        """
        Find a spectrometer which wraps the given detector
        return (Detector): the spectrometer
        raise LookupError: if nothing found.
        """
        for spec in self.main_app.main_data.spectrometers:
            # Check by name as the components are actually Pyro proxies, which
            # might not be equal even if they point to the same component.
            if (model.hasVA(spec, "dependencies")
                    and detector.name in (d.name
                                          for d in spec.dependencies.value)):
                return spec

        raise LookupError("No spectrometer corresponding to %s found" %
                          (detector.name, ))

    def start(self):
        if self.main_app.main_data.tab.value.name != "sparc_acqui":
            box = wx.MessageDialog(
                self.main_app.main_frame,
                "AR spectral acquisition must be done from the acquisition tab.",
                "AR spectral acquisition not possible", wx.OK | wx.ICON_STOP)
            box.ShowModal()
            box.Destroy()
            return

        # get region and dwelltime for drift correction
        self._ARspectral_s.dcRegion.value = self._tab_data.driftCorrector.roi.value
        self._ARspectral_s.dcDwellTime.value = self._tab_data.driftCorrector.dwellTime.value

        # Update the grating position to its current position
        self.grating.value = self.sgrh.position.value["grating"]

        # get survey
        self._survey_s = self._get_sem_survey()

        # For ROI:
        roi = self._tab_data.semStream.roi.value
        if roi == UNDEFINED_ROI:
            roi = (0, 0, 1, 1)
        self.roi.value = roi
        logging.debug("ROA = %s", self.roi.value)

        self._update_exp_dur()
        self._update_res()
        self._update_cam_res()

        # Create a window
        dlg = AcquisitionDialog(
            self, "AR Spectral acquisition",
            "Acquires a hyperspectral AR CL image\n"
            "Specify the relevant settings and start the acquisition\n")

        self.filename.value = self._get_new_filename()
        dlg.addSettings(self, conf=self.vaconf)
        dlg.addButton("Close")
        dlg.addButton("Acquire", self.acquire, face_colour='blue')

        # Show the window, and wait until the acquisition is over
        ans = dlg.ShowModal()

        # The window is closed
        if ans == 0:
            logging.info("AR spectral acquisition cancelled")
        elif ans == 1:
            logging.info("AR spectral acquisition completed")
        else:
            logging.debug("Unknown return code %d", ans)

        dlg.Destroy()

    def acquire(self, dlg):
        # Stop the spot stream and any other stream playing to not interfere with the acquisition
        str_ctrl = self._tab.streambar_controller
        stream_paused = str_ctrl.pauseStreams()

        strs = []
        if self._survey_s:
            strs.append(self._survey_s)

        strs.append(self._ARspectral_s)

        fn = self.filename.value
        exporter = dataio.find_fittest_converter(fn)

        try:
            f = acqmng.acquire(strs, self.main_app.main_data.settings_obs)
            dlg.showProgress(f)
            das, e = f.result(
            )  # blocks until all the acquisitions are finished
        except CancelledError:
            pass
        finally:
            pass

        if not f.cancelled() and das:
            if e:
                logging.warning("AR spectral scan partially failed: %s", e)
            logging.debug("Will save data to %s", fn)
            logging.debug("Going to export data: %s", das)
            exporter.export(fn, das)

        dlg.Close()
Example #20
0
 def test_formats_to_wildcards(self):
     inp = {"HDF5":[".h5", ".hdf5"]}
     exp_out = ("HDF5 files (*.h5;*.hdf5)|*.h5;*.hdf5",
                ["HDF5"])
     out = formats_to_wildcards(inp)
     self.assertEqual(out, exp_out)
Example #21
0
class TimelapsePlugin(Plugin):
    name = "Timelapse"
    __version__ = "2.2"
    __author__ = u"Éric Piel"
    __license__ = "Public domain"

    # Describe how the values should be displayed
    # See odemis.gui.conf.data for all the possibilities
    vaconf = OrderedDict((
        ("period", {
            "tooltip": "Time between each acquisition",
            "scale": "log",
        }),
        (
            "numberOfAcquisitions",
            {
                "control_type": odemis.gui.CONTROL_INT,  # no slider
            }),
        (
            "semOnlyOnLast",
            {
                "label": "SEM only on the last",
                "tooltip": "Acquire SEM images only once, after the timelapse",
                "control_type": odemis.gui.CONTROL_NONE,  # hidden by default
            }),
        ("filename", {
            "control_type":
            odemis.gui.CONTROL_SAVE_FILE,
            "wildcard":
            formats_to_wildcards(get_available_formats(os.O_WRONLY))[0],
        }),
        ("expectedDuration", {}),
    ))

    def __init__(self, microscope, main_app):
        super(TimelapsePlugin, self).__init__(microscope, main_app)
        # Can only be used with a microscope
        if not microscope:
            return

        self.period = model.FloatContinuous(10, (1e-3, 10000),
                                            unit="s",
                                            setter=self._setPeriod)
        # TODO: prevent period < acquisition time of all streams
        self.numberOfAcquisitions = model.IntContinuous(100, (2, 100000))
        self.semOnlyOnLast = model.BooleanVA(False)
        self.filename = model.StringVA("a.h5")
        self.expectedDuration = model.VigilantAttribute(1,
                                                        unit="s",
                                                        readonly=True)

        self.period.subscribe(self._update_exp_dur)
        self.numberOfAcquisitions.subscribe(self._update_exp_dur)

        # On SECOM/DELPHI, propose to only acquire the SEM at the end
        if microscope.role in ("secom", "delphi", "enzel"):
            self.vaconf["semOnlyOnLast"][
                "control_type"] = odemis.gui.CONTROL_CHECK

        self._dlg = None
        self.addMenu("Acquisition/Timelapse...\tCtrl+T", self.start)

        self._to_store = queue.Queue(
        )  # queue of tuples (str, [DataArray]) for saving data
        self._sthreads = []  # the saving threads
        self._exporter = None  # dataio exporter to use

    def _get_new_filename(self):
        conf = get_acqui_conf()
        return os.path.join(
            conf.last_path,
            u"%s%s" % (time.strftime("%Y%m%d-%H%M%S"), conf.last_extension))

    def _update_exp_dur(self, _=None):
        """
        Called when VA that affects the expected duration is changed
        """
        nb = self.numberOfAcquisitions.value
        p = self.period.value
        ss, last_ss = self._get_acq_streams()

        sacqt = acqmng.estimateTime(ss)
        logging.debug("Estimating %g s acquisition for %d streams", sacqt,
                      len(ss))
        intp = max(0, p - sacqt)

        dur = sacqt * nb + intp * (nb - 1)
        if last_ss:
            dur += acqmng.estimateTime(ss + last_ss) - sacqt

        # Use _set_value as it's read only
        self.expectedDuration._set_value(math.ceil(dur), force_write=True)

    def _setPeriod(self, period):
        # It should be at least as long as the acquisition time of all the streams
        tot_time = 0
        for s in self._get_acq_streams()[0]:
            acqt = s.estimateAcquisitionTime()
            # Normally we round-up in order to be pessimistic on the duration,
            # but here it's better to be a little optimistic and allow the user
            # to pick a really short period (if each stream has a very short
            # acquisition time).
            acqt = max(1e-3, acqt - Stream.SETUP_OVERHEAD)
            tot_time += acqt

        return min(max(tot_time, period), self.period.range[1])

    def _get_live_streams(self, tab_data):
        """
        Return all the live streams present in the given tab
        """
        ss = list(tab_data.streams.value)

        # On the SPARC, there is a Spot stream, which we don't need for live
        if hasattr(tab_data, "spotStream"):
            try:
                ss.remove(tab_data.spotStream)
            except ValueError:
                pass  # spotStream was not there anyway

        for s in ss:
            if isinstance(s, StaticStream):
                ss.remove(s)
        return ss

    def _get_acq_streams(self):
        """
        Return the streams that should be used for acquisition
        return:
           acq_st (list of streams): the streams to be acquired at every repetition
           last_st (list of streams): streams to be acquired at the end
        """
        if not self._dlg:
            return [], []

        live_st = (self._dlg.view.getStreams() +
                   self._dlg.hidden_view.getStreams())
        logging.debug("View has %d streams", len(live_st))

        # On the SPARC, the acquisition streams are not the same as the live
        # streams. On the SECOM/DELPHI, they are the same (for now)
        tab_data = self.main_app.main_data.tab.value.tab_data_model
        if hasattr(tab_data, "acquisitionStreams"):
            acq_st = tab_data.acquisitionStreams
            if isinstance(
                    acq_st, model.VigilantAttribute
            ):  # On ENZEL/METEOR, acquisitionStreams is a ListVA (instead of a set)
                acq_st = acq_st.value

            # Discard the acquisition streams which are not visible
            ss = []
            for acs in acq_st:
                if isinstance(acs, stream.MultipleDetectorStream):
                    if any(subs in live_st for subs in acs.streams):
                        ss.append(acs)
                        break
                elif acs in live_st:
                    ss.append(acs)
        else:
            # No special acquisition streams
            ss = live_st

        last_ss = []
        if self.semOnlyOnLast.value:
            last_ss = [s for s in ss if isinstance(s, stream.EMStream)]
            ss = [s for s in ss if not isinstance(s, stream.EMStream)]

        return ss, last_ss

    def start(self):
        # Fail if the live tab is not selected
        tab = self.main_app.main_data.tab.value
        if tab.name not in ("secom_live", "sparc_acqui",
                            "cryosecom-localization"):
            available_tabs = self.main_app.main_data.tab.choices.values()
            exp_tab_name = "localization" if "cryosecom-localization" in available_tabs else "acquisition"
            box = wx.MessageDialog(
                self.main_app.main_frame,
                "Timelapse acquisition must be done from the %s tab." %
                (exp_tab_name, ), "Timelapse acquisition not possible",
                wx.OK | wx.ICON_STOP)
            box.ShowModal()
            box.Destroy()
            return

        # On SPARC, fail if no ROI selected
        try:
            if tab.tab_data_model.semStream.roi.value == UNDEFINED_ROI:
                box = wx.MessageDialog(
                    self.main_app.main_frame,
                    "You need to select a region of acquisition.",
                    "Timelapse acquisition not possible", wx.OK | wx.ICON_STOP)
                box.ShowModal()
                box.Destroy()
                return
        except AttributeError:
            pass  # Not a SPARC

        # Stop the stream(s) playing to not interfere with the acquisition
        tab.streambar_controller.pauseStreams()

        self.filename.value = self._get_new_filename()
        dlg = AcquisitionDialog(
            self, "Timelapse acquisition",
            "The same streams will be acquired multiple times, defined by the 'number of acquisitions'.\n"
            "The time separating each acquisition is defined by the 'period'.\n"
        )
        self._dlg = dlg
        dlg.addSettings(self, self.vaconf)
        ss = self._get_live_streams(tab.tab_data_model)
        for s in ss:
            if isinstance(
                    s,
                (ARStream, SpectrumStream, MonochromatorSettingsStream)):
                # TODO: instead of hard-coding the list, a way to detect the type
                # of live image?
                logging.info(
                    "Not showing stream %s, for which the live image is not spatial",
                    s)
                dlg.addStream(s, index=None)
            else:
                dlg.addStream(s)
        dlg.addButton("Cancel")
        dlg.addButton("Acquire", self.acquire, face_colour='blue')

        # Force to re-check the minimum period time
        self.period.value = self.period.value

        # Update acq time when streams are added/removed
        dlg.view.stream_tree.flat.subscribe(self._update_exp_dur, init=True)
        dlg.hidden_view.stream_tree.flat.subscribe(self._update_exp_dur,
                                                   init=True)
        # TODO: update the acquisition time whenever a setting changes

        # TODO: disable "acquire" button if no stream selected

        # TODO: also display the repetition and axis settings for the SPARC streams.

        ans = dlg.ShowModal()

        if ans == 0:
            logging.info("Acquisition cancelled")
        elif ans == 1:
            logging.info("Acquisition completed")
        else:
            logging.warning("Got unknown return code %s", ans)

        dlg.view.stream_tree.flat.unsubscribe(self._update_exp_dur)

        dlg.Destroy()

    # Functions to handle the storage of the data in parallel threads

    def _saving_thread(self, i):
        try:
            while True:
                fn, das = self._to_store.get()
                if fn is None:
                    self._to_store.task_done()
                    return
                logging.info("Saving data %s in thread %d", fn, i)
                self._exporter.export(fn, das)
                self._to_store.task_done()
        except Exception:
            logging.exception("Failure in the saving thread")
        finally:
            logging.debug("Saving thread %d done", i)

    def _start_saving_threads(self, n=4):
        """
        n (int >= 1): number of threads
        """
        if self._sthreads:
            logging.warning(
                "The previous saving threads were not stopped, stopping now")
            self._stop_saving_threads()

        for i in range(n):
            t = threading.Thread(target=self._saving_thread, args=(i, ))
            t.start()
            self._sthreads.append(t)

    def _stop_saving_threads(self):
        """
        Blocks until all the data has been stored
        Can be called multiple times in a row
        """
        # Indicate to all the threads that they should stop
        for _ in self._sthreads:
            self._to_store.put(
                (None, None))  # Special "quit" message for each thread

        # Wait for all the threads to complete
        self._to_store.join()
        for t in self._sthreads:
            t.join()
        self._sthreads = []

    def _save_data(self, fn, das):
        """
        Queue the requested DataArrays to be stored in the given file
        """
        self._to_store.put((fn, das))

    def acquire(self, dlg):
        main_data = self.main_app.main_data
        str_ctrl = main_data.tab.value.streambar_controller
        stream_paused = str_ctrl.pauseStreams()
        dlg.pauseSettings()

        self._start_saving_threads(4)

        ss, last_ss = self._get_acq_streams()
        sacqt = acqmng.estimateTime(ss)
        p = self.period.value
        nb = self.numberOfAcquisitions.value

        try:
            # If the user just wants to acquire as fast as possible, and there
            # a single stream, we can use an optimised version
            if (len(ss) == 1 and isinstance(ss[0], LiveStream) and nb >= 2
                    and sacqt < 5 and p < sacqt + Stream.SETUP_OVERHEAD):
                logging.info(
                    "Fast timelapse detected, will acquire as fast as possible"
                )
                self._fast_acquire_one(dlg, ss[0], last_ss)
            else:
                self._acquire_multi(dlg, ss, last_ss)
        finally:
            # Make sure the threads are stopped even in case of error
            self._stop_saving_threads()

        # self.showAcquisition(self.filename.value)

        logging.debug("Closing dialog")
        dlg.Close()

    def _fast_acquire_one(self, dlg, st, last_ss):
        """
        Acquires one stream, *as fast as possible* (ie, the period is not used).
        Only works with LiveStreams (and not with MDStreams)
        st (LiveStream)
        last_ss (list of Streams): all the streams to be acquire on the last time
        """
        # Essentially, we trick a little bit the stream, by convincing it that
        # we want a live view, but instead of display the data, we store them.
        # It's much faster because we don't have to stop/start the detector between
        # each acquisition.
        nb = self.numberOfAcquisitions.value

        fn = self.filename.value
        self._exporter = dataio.find_fittest_converter(fn)
        bs, ext = splitext(fn)
        fn_pat = bs + "-%.5d" + ext

        self._acq_completed = threading.Event()

        f = model.ProgressiveFuture()
        f.task_canceller = self._cancel_fast_acquire
        f._stream = st
        if last_ss:
            nb -= 1
            extra_dur = acqmng.estimateTime([st] + last_ss)
        else:
            extra_dur = 0
        self._hijack_live_stream(st, f, nb, fn_pat, extra_dur)

        try:
            # Start acquisition and wait until it's done
            f.set_running_or_notify_cancel(
            )  # Indicate the work is starting now
            dlg.showProgress(f)
            st.is_active.value = True
            self._acq_completed.wait()

            if f.cancelled():
                dlg.resumeSettings()
                return
        finally:
            st.is_active.value = False  # just to be extra sure it's stopped
            logging.debug("Restoring stream %s", st)
            self._restore_live_stream(st)

        # last "normal" acquisition, if needed
        if last_ss:
            logging.debug("Acquiring last acquisition, with all the streams")
            ss = [st] + last_ss
            f.set_progress(end=time.time() + acqmng.estimateTime(ss))
            das, e = acqmng.acquire(
                ss, self.main_app.main_data.settings_obs).result()
            self._save_data(fn_pat % (nb, ), das)

        self._stop_saving_threads()  # Wait for all the data to be stored
        f.set_result(None)  # Indicate it's over

    def _cancel_fast_acquire(self, f):
        f._stream.is_active.value = False
        self._acq_completed.set()
        return True

    def _hijack_live_stream(self, st, f, nb, fn_pat, extra_dur=0):
        st._old_shouldUpdateHistogram = st._shouldUpdateHistogram
        st._shouldUpdateHistogram = lambda: None
        self._data_received = 0

        dur_one = st.estimateAcquisitionTime() - Stream.SETUP_OVERHEAD

        # Function that will be called after each new raw data has been received
        def store_raw_data():
            i = self._data_received
            self._data_received += 1
            logging.debug("Received data %d", i)
            if self._data_received == nb:
                logging.debug("Stopping the stream")
                st.is_active.value = False
                self._acq_completed.set()
            elif self._data_received > nb:
                # sometimes it goes too fast, and an extra data is received
                logging.debug("Skipping extra data")
                return

            self._save_data(fn_pat % (i, ), [st.raw[0]])

            # Update progress bar
            left = nb - i
            dur = dur_one * left + extra_dur
            f.set_progress(end=time.time() + dur)

        st._old_shouldUpdateImage = st._shouldUpdateImage
        st._shouldUpdateImage = store_raw_data

    def _restore_live_stream(self, st):
        st._shouldUpdateImage = st._old_shouldUpdateImage
        del st._old_shouldUpdateImage
        st._shouldUpdateHistogram = st._old_shouldUpdateHistogram
        del st._old_shouldUpdateHistogram

    def _acquire_multi(self, dlg, ss, last_ss):
        p = self.period.value
        nb = self.numberOfAcquisitions.value

        fn = self.filename.value
        self._exporter = dataio.find_fittest_converter(fn)
        bs, ext = splitext(fn)
        fn_pat = bs + "-%.5d" + ext

        sacqt = acqmng.estimateTime(ss)
        intp = max(0, p - sacqt)
        if p < sacqt:
            logging.warning(
                "Acquisition will take %g s, but period between acquisition must be only %g s",
                sacqt, p)

        # TODO: if drift correction, use it over all the time

        f = model.ProgressiveFuture()
        f.task_canceller = lambda l: True  # To allow cancelling while it's running
        f.set_running_or_notify_cancel()  # Indicate the work is starting now
        dlg.showProgress(f)

        for i in range(nb):
            left = nb - i
            dur = sacqt * left + intp * (left - 1)
            if left == 1 and last_ss:
                ss += last_ss
                dur += acqmng.estimateTime(ss) - sacqt

            startt = time.time()
            f.set_progress(end=startt + dur)
            das, e = acqmng.acquire(
                ss, self.main_app.main_data.settings_obs).result()
            if f.cancelled():
                dlg.resumeSettings()
                return

            self._save_data(fn_pat % (i, ), das)

            # Wait the period requested, excepted the last time
            if left > 1:
                sleept = (startt + p) - time.time()
                if sleept > 0:
                    time.sleep(sleept)
                else:
                    logging.info(
                        "Immediately starting next acquisition, %g s late",
                        -sleept)

        self._stop_saving_threads()  # Wait for all the data to be stored
        f.set_result(None)  # Indicate it's over