def test_load_full(self):
        """
        Check the whole sequence: saving calibration data to file, loading it
        back from file, finding it.
        """
        # Background data
        dbckg = numpy.array([1, 2, 2, 3, 4, 5, 4, 6, 9], dtype=numpy.uint16)
        dbckg.shape += (1, 1, 1, 1)
        wl_calib = 400e-9 + numpy.array(range(dbckg.shape[0])) * 10e-9
        bckg = model.DataArray(dbckg, metadata={model.MD_WL_LIST: wl_calib})

        # Give one DA, the correct one, so expect to get it back

        # Compensation data
        dcalib = numpy.array([1, 1.3, 2, 3.5, 4, 5, 0.1, 6, 9.1], dtype=numpy.float)
        dcalib.shape = (dcalib.shape[0], 1, 1, 1, 1)
        wl_calib = 400e-9 + numpy.array(range(dcalib.shape[0])) * 10e-9
        calib = model.DataArray(dcalib, metadata={model.MD_WL_LIST: wl_calib})

        # More DataArrays, just to make it slightly harder to find the data
        data1 = model.DataArray(numpy.ones((1, 1, 1, 520, 230), dtype=numpy.uint16))
        data2 = model.DataArray(numpy.zeros((3, 1, 1, 520, 230), dtype=numpy.uint16))

        # RGB image
        thumb = model.DataArray(numpy.ones((520, 230, 3), dtype=numpy.uint8))

        full_coef = [data1, calib, data2]
        full_bckg = [data1, bckg, data2]

        for fmt in dataio.get_available_formats(os.O_WRONLY):
            exporter = dataio.get_converter(fmt)
            logging.info("Trying to export/import with %s", fmt)
            fn_coef = u"test_spec" + exporter.EXTENSIONS[0]
            exporter.export(fn_coef, full_coef, thumb)
            fn_bckg = u"test_bckg" + exporter.EXTENSIONS[0]
            exporter.export(fn_bckg, full_bckg, thumb)

            if fmt in dataio.get_available_formats(os.O_RDONLY):
                data_bckg = exporter.read_data(fn_bckg)
                ibckg = calibration.get_spectrum_data(data_bckg)
                data_coef = exporter.read_data(fn_coef)
                icoef = calibration.get_spectrum_efficiency(data_coef)
                numpy.testing.assert_equal(icoef, calib)
                numpy.testing.assert_almost_equal(icoef.metadata[model.MD_WL_LIST],
                                                  calib.metadata[model.MD_WL_LIST])
                numpy.testing.assert_equal(ibckg, bckg)
                numpy.testing.assert_almost_equal(ibckg.metadata[model.MD_WL_LIST],
                                                  bckg.metadata[model.MD_WL_LIST])
            try:
                os.remove(fn_coef)
            except OSError:
                logging.exception("Failed to delete the file %s", fn_coef)
            try:
                os.remove(fn_bckg)
            except OSError:
                logging.exception("Failed to delete the file %s", fn_bckg)
    def test_load_full(self):
        """
        Check the whole sequence: saving calibration data to file, loading it
        back from file, finding it.
        """
        # AR background data
        dcalib = numpy.zeros((512, 1024), dtype=numpy.uint16)
        md = {model.MD_SW_VERSION: "1.0-test",
             model.MD_HW_NAME: "fake ccd",
             model.MD_DESCRIPTION: "AR",
             model.MD_ACQ_DATE: time.time(),
             model.MD_BPP: 12,
             model.MD_BINNING: (1, 1), # px, px
             model.MD_SENSOR_PIXEL_SIZE: (13e-6, 13e-6), # m/px
             model.MD_PIXEL_SIZE: (1e-6, 2e-5), # m/px
             model.MD_POS: (1.2e-3, -30e-3), # m
             model.MD_EXP_TIME: 1.2, # s
             model.MD_AR_POLE: (253.1, 65.1),
             model.MD_LENS_MAG: 60, # ratio
            }
        calib = model.DataArray(dcalib, md)

        # Give one DA, the correct one, so expect to get it back
        out = calibration.get_ar_data([calib])
        numpy.testing.assert_equal(out, calib)

        # More DataArrays, just to make it slightly harder to find the data
        data1 = model.DataArray(numpy.ones((1, 1, 1, 520, 230), dtype=numpy.uint16),
                                metadata={model.MD_POS: (1.2e-3, -30e-3)})
        data2 = model.DataArray(17 * numpy.ones((1, 1), dtype=numpy.uint16),
                                metadata={model.MD_POS: (1.2e-3, -30e-3)})
        # RGB image
        thumb = model.DataArray(numpy.ones((520, 230, 3), dtype=numpy.uint8))

        full_data = [data1, calib, data2]

        for fmt in dataio.get_available_formats(os.O_WRONLY):
            exporter = dataio.get_converter(fmt)
            logging.info("Trying to export/import with %s", fmt)
            fn = u"test_ar" + exporter.EXTENSIONS[0]
            exporter.export(fn, full_data, thumb)

            if fmt in dataio.get_available_formats(os.O_RDONLY):
                idata = exporter.read_data(fn)
                icalib = calibration.get_ar_data(idata)
                icalib2d = img.ensure2DImage(icalib)
                numpy.testing.assert_equal(icalib2d, calib)
                numpy.testing.assert_almost_equal(icalib.metadata[model.MD_AR_POLE],
                                                  calib.metadata[model.MD_AR_POLE])
            try:
                os.remove(fn)
            except OSError:
                logging.exception("Failed to delete the file %s", fn)
Exemple #3
0
    def test_get_available_formats(self):
        for mode in [os.O_RDONLY, os.O_WRONLY, os.O_RDWR]:
            fmts = get_available_formats(mode)
            self.assertGreaterEqual(len(dataio.__all__), len(fmts))

            for fmt, exts in fmts.items():
                for ext in exts:
                    self.assertTrue(ext.startswith("."),
                            "extension '%s' doesn't start with a dot" % ext)

        # including lossy formats
        all_fmts = get_available_formats(os.O_RDWR, allowlossy=True)
        self.assertEqual(len(dataio.__all__), len(all_fmts))
Exemple #4
0
    def _get_snapshot_info(self, dialog=False):
        config = conf.get_acqui_conf()

        tab, filepath, exporter = self._main_data_model.tab.value, None, None

        if dialog:
            format_info = get_available_formats()
            wildcards, formats = formats_to_wildcards(format_info)
            # The default file name should be empty because otherwise the
            # dialog will add an extension that won't change when the user
            # selects a different file type in the dialog.
            dlg = wx.FileDialog(self._main_frame,
                                "Save Snapshot",
                                config.last_path,
                                "",
                                wildcard=wildcards,
                                style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)

            # Select the last format used
            try:
                idx = formats.index(config.last_format)
            except ValueError:
                idx = 0
            dlg.SetFilterIndex(idx)

            if dlg.ShowModal() == wx.ID_OK:
                path = dlg.GetPath()
                fmt = formats[dlg.GetFilterIndex()]
                extension = format_info[fmt][0]

                # Prevent double extensions when an old file is selected
                filepath, _ = os.path.splitext(path)
                filepath = filepath + extension

                config.last_path = os.path.dirname(path)
                config.last_format = fmt
                config.last_extension = extension
                config.write()
                exporter = dataio.get_exporter(config.last_format)

            dlg.Destroy()
        else:
            extension = config.last_extension
            dirname = get_picture_folder()
            basename = time.strftime("%Y%m%d-%H%M%S", time.localtime())
            filepath = os.path.join(dirname, basename + extension)
            exporter = dataio.get_exporter(config.last_format)

            if os.path.exists(filepath):
                msg = "File '%s' already exists, cancelling snapshot"
                logging.warning(msg, filepath)
                tab, filepath, exporter = None, None, None

        return tab, filepath, exporter
Exemple #5
0
    def open_image(self, dlg):
        tab = self.main_app.main_data.getTabByName("analysis")
        tab_data = tab.tab_data_model
        fi = tab_data.acq_fileinfo.value

        if fi and fi.file_name:
            path, _ = os.path.split(fi.file_name)
        else:
            config = get_acqui_conf()
            path = config.last_path

        # Find the available formats (and corresponding extensions)
        formats_to_ext = dataio.get_available_formats(os.O_RDONLY)
        wildcards, formats = guiutil.formats_to_wildcards(formats_to_ext, include_all=True)
        dialog = wx.FileDialog(dlg,
                               message="Choose a file to load",
                               defaultDir=path,
                               defaultFile="",
                               style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST,
                               wildcard=wildcards)

        # Show the dialog and check whether is was accepted or cancelled
        if dialog.ShowModal() != wx.ID_OK:
            return None

        # Detect the format to use
        filename = dialog.GetPath()

        data = udataio.open_acquisition(filename)[0]
        try:
            data = self._ensureGrayscale(data)
        except ValueError as ex:
            box = wx.MessageDialog(dlg, str(ex), "Failed to open image",
                                   wx.OK | wx.ICON_STOP)
            box.ShowModal()
            box.Destroy()
            return None

        self.crop_top.range = (0, data.shape[0] // 2)
        self.crop_bottom.range = (0, data.shape[0] // 2)
        self.crop_left.range = (0, data.shape[1] // 2)
        self.crop_right.range = (0, data.shape[1] // 2)

        data.metadata[model.MD_POS] = (0, 0)
        data.metadata[model.MD_PIXEL_SIZE] = (1e-9, 1e-9)

        basename = os.path.splitext(os.path.split(filename)[1])[0]
        return stream.StaticSEMStream(basename, data)
Exemple #6
0
    def _get_export_info(self):
        """
        Return str, str, str: full filename, exporter name, export type
          Full filename is None if cancelled by user
        """
        # Set default to the first of the list
        view = self._data_model.focussedView.value
        export_type = self.get_export_type(view)
        formats = EXPORTERS[export_type]
        if self._conf.export_raw:
            default_exporter = get_converter(formats[1][0])
        else:
            default_exporter = get_converter(formats[0][0])
        extension = default_exporter.EXTENSIONS[0]

        # Suggested name= current file name + stream/view name + extension of default format
        fi = self._data_model.acq_fileinfo.value
        if fi is not None and fi.file_name:
            basename = os.path.basename(fi.file_name)
            # Remove the extension
            formats_to_ext = dataio.get_available_formats()
            all_exts = sum(formats_to_ext.values(), [])
            fexts = sorted((ext for ext in all_exts if basename.endswith(ext)),
                           key=lambda s: len(s))
            if fexts:
                # Remove the biggest extension
                basename = basename[:-len(fexts[-1])]
            else:
                # Try to remove whichever extension there is
                basename, _ = os.path.splitext(basename)

            # Use stream name, if there is just one stream, otherwise use the view name
            streams = view.getStreams()
            if len(streams) == 1:
                basename += " " + streams[0].name.value
            else:
                # TODO: remove numbers from the view name?
                basename += " " + view.name.value
        else:
            basename = time.strftime("%Y%m%d-%H%M%S", time.localtime())

        filepath = os.path.join(self._conf.last_export_path, basename + extension)

        # filepath will be None if cancelled by user
        return self.ShowExportFileDialog(filepath, default_exporter)
Exemple #7
0
def splitext(path):
    """
    Split a pathname into basename + ext (.XXX).
    Does pretty much the same as os.path.splitext, but handles "double" extensions
    like ".ome.tiff".
    """
    root, ext = os.path.splitext(path)

    # See if there is a longer extension in the known formats
    fmts = dataio.get_available_formats(mode=os.O_RDWR, allowlossy=True)
    # Note, this one-liner also works, but brain-teasers are not good code:
    # max((fe for fes in fmts.values() for fe in fes if path.endswith(fe)), key=len)
    for fmtexts in fmts.values():
        for fmtext in fmtexts:
            if path.endswith(fmtext) and len(fmtext) > len(ext):
                ext = fmtext

    root = path[:len(path) - len(ext)]
    return root, ext
Exemple #8
0
def main(args):
    """
    Handles the command line arguments
    args is the list of arguments passed
    return (int): value to return to the OS as program exit code
    """
    # arguments handling
    parser = argparse.ArgumentParser(description="File format conversion utility")

    parser.add_argument('--version', dest="version", action='store_true',
                        help="show program's version number and exit")
    parser.add_argument("--input", "-i", dest="input",
                        help="name of the input file")
    parser.add_argument("--tiles", "-t", dest="tiles", nargs="+",
                        help="list of files acquired in tiles to re-assemble")
    parser.add_argument("--effcomp", dest="effcomp",
                        help="name of a spectrum efficiency compensation table (in CSV format)")
    fmts = dataio.get_available_formats(os.O_WRONLY)
    parser.add_argument("--output", "-o", dest="output",
            help="name of the output file. "
            "The file format is derived from the extension (%s are supported)." %
            (" and ".join(fmts)))
    # TODO: automatically select pyramidal format if image > 4096px?
    parser.add_argument("--pyramid", "-p", dest="pyramid", action='store_true',
                        help="Export the data in pyramidal format. "
                        "It takes about 2x more space, but allows to visualise large images. "
                        "Currently, only the TIFF format supports this option.")
    parser.add_argument("--minus", "-m", dest="minus", action='append',
            help="name of an acquisition file whose data is subtracted from the input file.")
    parser.add_argument("--weaver", "-w", dest="weaver",
            help="name of weaver to be used during stitching. Options: 'mean': MeanWeaver " 
            "(blend overlapping regions of adjacent tiles), 'collage': CollageWeaver "
            "(paste tiles as-is at calculated position)", choices=("mean", "collage", "collage_reverse"),
            default='mean')
    parser.add_argument("--registrar", "-r", dest="registrar",
            help="name of registrar to be used during stitching. Options: 'identity': IdentityRegistrar "
            "(place tiles at original position), 'shift': ShiftRegistrar (use cross-correlation "
            "algorithm to correct for suboptimal stage movement), 'global_shift': GlobalShiftRegistrar "
            "(uses cross-correlation algorithm with global optimization)",
            choices=("identity", "shift", "global_shift"), default="global_shift")

    # TODO: --export (spatial) image that defaults to a HFW corresponding to the
    # smallest image, and can be overridden by --hfw xxx (in µm).
    # TODO: --range parameter to select which image to select from the input
    #      (like: 1-4,5,6-10,12)

    options = parser.parse_args(args[1:])

    # Cannot use the internal feature, because it doesn't support multi-line
    if options.version:
        print(odemis.__fullname__ + " " + odemis.__version__ + "\n" +
              odemis.__copyright__ + "\n" +
              "Licensed under the " + odemis.__license__)
        return 0

    infn = options.input
    tifns = options.tiles
    ecfn = options.effcomp
    outfn = options.output

    if not (infn or tifns or ecfn) or not outfn:
        raise ValueError("--input/--tiles/--effcomp and --output arguments must be provided.")

    if sum(not not o for o in (infn, tifns, ecfn)) != 1:
        raise ValueError("--input, --tiles, --effcomp cannot be provided simultaneously.")

    if infn:
        data, thumbs = open_acq(infn)
        logging.info("File contains %d %s (and %d %s)",
                     len(data), ngettext("image", "images", len(data)),
                     len(thumbs), ngettext("thumbnail", "thumbnails", len(thumbs)))
    elif tifns:
        registration_method = {"identity": REGISTER_IDENTITY, "shift": REGISTER_SHIFT,
                               "global_shift": REGISTER_GLOBAL_SHIFT}[options.registrar]
        weaving_method = {"collage": WEAVER_COLLAGE, "mean": WEAVER_MEAN,
                  "collage_reverse": WEAVER_COLLAGE_REVERSE}[options.weaver]
        data = stitch(tifns, registration_method, weaving_method)
        thumbs = []
        logging.info("File contains %d %s",
                     len(data), ngettext("stream", "streams", len(data)))
    elif ecfn:
        data = open_ec(ecfn)
        thumbs = []
        logging.info("File contains %d coefficients", data[0].shape[0])

    if options.minus:
        if thumbs:
            logging.info("Dropping thumbnail due to subtraction")
            thumbs = []
        for fn in options.minus:
            sdata, _ = open_acq(fn)
            data = minus(data, sdata)

    save_acq(outfn, data, thumbs, options.pyramid)

    logging.info("Successfully generated file %s", outfn)
Exemple #9
0
class TimelapsePlugin(Plugin):
    name = "Timelapse"
    __version__ = "2.2"
    __author__ = u"Éric Piel"
    __license__ = "Public domain"

    # Describe how the values should be displayed
    # See odemis.gui.conf.data for all the possibilities
    vaconf = OrderedDict((
        ("period", {
            "tooltip": "Time between each acquisition",
            "scale": "log",
        }),
        (
            "numberOfAcquisitions",
            {
                "control_type": odemis.gui.CONTROL_INT,  # no slider
            }),
        (
            "semOnlyOnLast",
            {
                "label": "SEM only on the last",
                "tooltip": "Acquire SEM images only once, after the timelapse",
                "control_type": odemis.gui.CONTROL_NONE,  # hidden by default
            }),
        ("filename", {
            "control_type":
            odemis.gui.CONTROL_SAVE_FILE,
            "wildcard":
            formats_to_wildcards(get_available_formats(os.O_WRONLY))[0],
        }),
        ("expectedDuration", {}),
    ))

    def __init__(self, microscope, main_app):
        super(TimelapsePlugin, self).__init__(microscope, main_app)
        # Can only be used with a microscope
        if not microscope:
            return

        self.period = model.FloatContinuous(10, (1e-3, 10000),
                                            unit="s",
                                            setter=self._setPeriod)
        # TODO: prevent period < acquisition time of all streams
        self.numberOfAcquisitions = model.IntContinuous(100, (2, 100000))
        self.semOnlyOnLast = model.BooleanVA(False)
        self.filename = model.StringVA("a.h5")
        self.expectedDuration = model.VigilantAttribute(1,
                                                        unit="s",
                                                        readonly=True)

        self.period.subscribe(self._update_exp_dur)
        self.numberOfAcquisitions.subscribe(self._update_exp_dur)

        # On SECOM/DELPHI, propose to only acquire the SEM at the end
        if microscope.role in ("secom", "delphi", "enzel"):
            self.vaconf["semOnlyOnLast"][
                "control_type"] = odemis.gui.CONTROL_CHECK

        self._dlg = None
        self.addMenu("Acquisition/Timelapse...\tCtrl+T", self.start)

        self._to_store = queue.Queue(
        )  # queue of tuples (str, [DataArray]) for saving data
        self._sthreads = []  # the saving threads
        self._exporter = None  # dataio exporter to use

    def _get_new_filename(self):
        conf = get_acqui_conf()
        return os.path.join(
            conf.last_path,
            u"%s%s" % (time.strftime("%Y%m%d-%H%M%S"), conf.last_extension))

    def _update_exp_dur(self, _=None):
        """
        Called when VA that affects the expected duration is changed
        """
        nb = self.numberOfAcquisitions.value
        p = self.period.value
        ss, last_ss = self._get_acq_streams()

        sacqt = acqmng.estimateTime(ss)
        logging.debug("Estimating %g s acquisition for %d streams", sacqt,
                      len(ss))
        intp = max(0, p - sacqt)

        dur = sacqt * nb + intp * (nb - 1)
        if last_ss:
            dur += acqmng.estimateTime(ss + last_ss) - sacqt

        # Use _set_value as it's read only
        self.expectedDuration._set_value(math.ceil(dur), force_write=True)

    def _setPeriod(self, period):
        # It should be at least as long as the acquisition time of all the streams
        tot_time = 0
        for s in self._get_acq_streams()[0]:
            acqt = s.estimateAcquisitionTime()
            # Normally we round-up in order to be pessimistic on the duration,
            # but here it's better to be a little optimistic and allow the user
            # to pick a really short period (if each stream has a very short
            # acquisition time).
            acqt = max(1e-3, acqt - Stream.SETUP_OVERHEAD)
            tot_time += acqt

        return min(max(tot_time, period), self.period.range[1])

    def _get_live_streams(self, tab_data):
        """
        Return all the live streams present in the given tab
        """
        ss = list(tab_data.streams.value)

        # On the SPARC, there is a Spot stream, which we don't need for live
        if hasattr(tab_data, "spotStream"):
            try:
                ss.remove(tab_data.spotStream)
            except ValueError:
                pass  # spotStream was not there anyway

        for s in ss:
            if isinstance(s, StaticStream):
                ss.remove(s)
        return ss

    def _get_acq_streams(self):
        """
        Return the streams that should be used for acquisition
        return:
           acq_st (list of streams): the streams to be acquired at every repetition
           last_st (list of streams): streams to be acquired at the end
        """
        if not self._dlg:
            return [], []

        live_st = (self._dlg.view.getStreams() +
                   self._dlg.hidden_view.getStreams())
        logging.debug("View has %d streams", len(live_st))

        # On the SPARC, the acquisition streams are not the same as the live
        # streams. On the SECOM/DELPHI, they are the same (for now)
        tab_data = self.main_app.main_data.tab.value.tab_data_model
        if hasattr(tab_data, "acquisitionStreams"):
            acq_st = tab_data.acquisitionStreams
            if isinstance(
                    acq_st, model.VigilantAttribute
            ):  # On ENZEL/METEOR, acquisitionStreams is a ListVA (instead of a set)
                acq_st = acq_st.value

            # Discard the acquisition streams which are not visible
            ss = []
            for acs in acq_st:
                if isinstance(acs, stream.MultipleDetectorStream):
                    if any(subs in live_st for subs in acs.streams):
                        ss.append(acs)
                        break
                elif acs in live_st:
                    ss.append(acs)
        else:
            # No special acquisition streams
            ss = live_st

        last_ss = []
        if self.semOnlyOnLast.value:
            last_ss = [s for s in ss if isinstance(s, stream.EMStream)]
            ss = [s for s in ss if not isinstance(s, stream.EMStream)]

        return ss, last_ss

    def start(self):
        # Fail if the live tab is not selected
        tab = self.main_app.main_data.tab.value
        if tab.name not in ("secom_live", "sparc_acqui",
                            "cryosecom-localization"):
            available_tabs = self.main_app.main_data.tab.choices.values()
            exp_tab_name = "localization" if "cryosecom-localization" in available_tabs else "acquisition"
            box = wx.MessageDialog(
                self.main_app.main_frame,
                "Timelapse acquisition must be done from the %s tab." %
                (exp_tab_name, ), "Timelapse acquisition not possible",
                wx.OK | wx.ICON_STOP)
            box.ShowModal()
            box.Destroy()
            return

        # On SPARC, fail if no ROI selected
        try:
            if tab.tab_data_model.semStream.roi.value == UNDEFINED_ROI:
                box = wx.MessageDialog(
                    self.main_app.main_frame,
                    "You need to select a region of acquisition.",
                    "Timelapse acquisition not possible", wx.OK | wx.ICON_STOP)
                box.ShowModal()
                box.Destroy()
                return
        except AttributeError:
            pass  # Not a SPARC

        # Stop the stream(s) playing to not interfere with the acquisition
        tab.streambar_controller.pauseStreams()

        self.filename.value = self._get_new_filename()
        dlg = AcquisitionDialog(
            self, "Timelapse acquisition",
            "The same streams will be acquired multiple times, defined by the 'number of acquisitions'.\n"
            "The time separating each acquisition is defined by the 'period'.\n"
        )
        self._dlg = dlg
        dlg.addSettings(self, self.vaconf)
        ss = self._get_live_streams(tab.tab_data_model)
        for s in ss:
            if isinstance(
                    s,
                (ARStream, SpectrumStream, MonochromatorSettingsStream)):
                # TODO: instead of hard-coding the list, a way to detect the type
                # of live image?
                logging.info(
                    "Not showing stream %s, for which the live image is not spatial",
                    s)
                dlg.addStream(s, index=None)
            else:
                dlg.addStream(s)
        dlg.addButton("Cancel")
        dlg.addButton("Acquire", self.acquire, face_colour='blue')

        # Force to re-check the minimum period time
        self.period.value = self.period.value

        # Update acq time when streams are added/removed
        dlg.view.stream_tree.flat.subscribe(self._update_exp_dur, init=True)
        dlg.hidden_view.stream_tree.flat.subscribe(self._update_exp_dur,
                                                   init=True)
        # TODO: update the acquisition time whenever a setting changes

        # TODO: disable "acquire" button if no stream selected

        # TODO: also display the repetition and axis settings for the SPARC streams.

        ans = dlg.ShowModal()

        if ans == 0:
            logging.info("Acquisition cancelled")
        elif ans == 1:
            logging.info("Acquisition completed")
        else:
            logging.warning("Got unknown return code %s", ans)

        dlg.view.stream_tree.flat.unsubscribe(self._update_exp_dur)

        dlg.Destroy()

    # Functions to handle the storage of the data in parallel threads

    def _saving_thread(self, i):
        try:
            while True:
                fn, das = self._to_store.get()
                if fn is None:
                    self._to_store.task_done()
                    return
                logging.info("Saving data %s in thread %d", fn, i)
                self._exporter.export(fn, das)
                self._to_store.task_done()
        except Exception:
            logging.exception("Failure in the saving thread")
        finally:
            logging.debug("Saving thread %d done", i)

    def _start_saving_threads(self, n=4):
        """
        n (int >= 1): number of threads
        """
        if self._sthreads:
            logging.warning(
                "The previous saving threads were not stopped, stopping now")
            self._stop_saving_threads()

        for i in range(n):
            t = threading.Thread(target=self._saving_thread, args=(i, ))
            t.start()
            self._sthreads.append(t)

    def _stop_saving_threads(self):
        """
        Blocks until all the data has been stored
        Can be called multiple times in a row
        """
        # Indicate to all the threads that they should stop
        for _ in self._sthreads:
            self._to_store.put(
                (None, None))  # Special "quit" message for each thread

        # Wait for all the threads to complete
        self._to_store.join()
        for t in self._sthreads:
            t.join()
        self._sthreads = []

    def _save_data(self, fn, das):
        """
        Queue the requested DataArrays to be stored in the given file
        """
        self._to_store.put((fn, das))

    def acquire(self, dlg):
        main_data = self.main_app.main_data
        str_ctrl = main_data.tab.value.streambar_controller
        stream_paused = str_ctrl.pauseStreams()
        dlg.pauseSettings()

        self._start_saving_threads(4)

        ss, last_ss = self._get_acq_streams()
        sacqt = acqmng.estimateTime(ss)
        p = self.period.value
        nb = self.numberOfAcquisitions.value

        try:
            # If the user just wants to acquire as fast as possible, and there
            # a single stream, we can use an optimised version
            if (len(ss) == 1 and isinstance(ss[0], LiveStream) and nb >= 2
                    and sacqt < 5 and p < sacqt + Stream.SETUP_OVERHEAD):
                logging.info(
                    "Fast timelapse detected, will acquire as fast as possible"
                )
                self._fast_acquire_one(dlg, ss[0], last_ss)
            else:
                self._acquire_multi(dlg, ss, last_ss)
        finally:
            # Make sure the threads are stopped even in case of error
            self._stop_saving_threads()

        # self.showAcquisition(self.filename.value)

        logging.debug("Closing dialog")
        dlg.Close()

    def _fast_acquire_one(self, dlg, st, last_ss):
        """
        Acquires one stream, *as fast as possible* (ie, the period is not used).
        Only works with LiveStreams (and not with MDStreams)
        st (LiveStream)
        last_ss (list of Streams): all the streams to be acquire on the last time
        """
        # Essentially, we trick a little bit the stream, by convincing it that
        # we want a live view, but instead of display the data, we store them.
        # It's much faster because we don't have to stop/start the detector between
        # each acquisition.
        nb = self.numberOfAcquisitions.value

        fn = self.filename.value
        self._exporter = dataio.find_fittest_converter(fn)
        bs, ext = splitext(fn)
        fn_pat = bs + "-%.5d" + ext

        self._acq_completed = threading.Event()

        f = model.ProgressiveFuture()
        f.task_canceller = self._cancel_fast_acquire
        f._stream = st
        if last_ss:
            nb -= 1
            extra_dur = acqmng.estimateTime([st] + last_ss)
        else:
            extra_dur = 0
        self._hijack_live_stream(st, f, nb, fn_pat, extra_dur)

        try:
            # Start acquisition and wait until it's done
            f.set_running_or_notify_cancel(
            )  # Indicate the work is starting now
            dlg.showProgress(f)
            st.is_active.value = True
            self._acq_completed.wait()

            if f.cancelled():
                dlg.resumeSettings()
                return
        finally:
            st.is_active.value = False  # just to be extra sure it's stopped
            logging.debug("Restoring stream %s", st)
            self._restore_live_stream(st)

        # last "normal" acquisition, if needed
        if last_ss:
            logging.debug("Acquiring last acquisition, with all the streams")
            ss = [st] + last_ss
            f.set_progress(end=time.time() + acqmng.estimateTime(ss))
            das, e = acqmng.acquire(
                ss, self.main_app.main_data.settings_obs).result()
            self._save_data(fn_pat % (nb, ), das)

        self._stop_saving_threads()  # Wait for all the data to be stored
        f.set_result(None)  # Indicate it's over

    def _cancel_fast_acquire(self, f):
        f._stream.is_active.value = False
        self._acq_completed.set()
        return True

    def _hijack_live_stream(self, st, f, nb, fn_pat, extra_dur=0):
        st._old_shouldUpdateHistogram = st._shouldUpdateHistogram
        st._shouldUpdateHistogram = lambda: None
        self._data_received = 0

        dur_one = st.estimateAcquisitionTime() - Stream.SETUP_OVERHEAD

        # Function that will be called after each new raw data has been received
        def store_raw_data():
            i = self._data_received
            self._data_received += 1
            logging.debug("Received data %d", i)
            if self._data_received == nb:
                logging.debug("Stopping the stream")
                st.is_active.value = False
                self._acq_completed.set()
            elif self._data_received > nb:
                # sometimes it goes too fast, and an extra data is received
                logging.debug("Skipping extra data")
                return

            self._save_data(fn_pat % (i, ), [st.raw[0]])

            # Update progress bar
            left = nb - i
            dur = dur_one * left + extra_dur
            f.set_progress(end=time.time() + dur)

        st._old_shouldUpdateImage = st._shouldUpdateImage
        st._shouldUpdateImage = store_raw_data

    def _restore_live_stream(self, st):
        st._shouldUpdateImage = st._old_shouldUpdateImage
        del st._old_shouldUpdateImage
        st._shouldUpdateHistogram = st._old_shouldUpdateHistogram
        del st._old_shouldUpdateHistogram

    def _acquire_multi(self, dlg, ss, last_ss):
        p = self.period.value
        nb = self.numberOfAcquisitions.value

        fn = self.filename.value
        self._exporter = dataio.find_fittest_converter(fn)
        bs, ext = splitext(fn)
        fn_pat = bs + "-%.5d" + ext

        sacqt = acqmng.estimateTime(ss)
        intp = max(0, p - sacqt)
        if p < sacqt:
            logging.warning(
                "Acquisition will take %g s, but period between acquisition must be only %g s",
                sacqt, p)

        # TODO: if drift correction, use it over all the time

        f = model.ProgressiveFuture()
        f.task_canceller = lambda l: True  # To allow cancelling while it's running
        f.set_running_or_notify_cancel()  # Indicate the work is starting now
        dlg.showProgress(f)

        for i in range(nb):
            left = nb - i
            dur = sacqt * left + intp * (left - 1)
            if left == 1 and last_ss:
                ss += last_ss
                dur += acqmng.estimateTime(ss) - sacqt

            startt = time.time()
            f.set_progress(end=startt + dur)
            das, e = acqmng.acquire(
                ss, self.main_app.main_data.settings_obs).result()
            if f.cancelled():
                dlg.resumeSettings()
                return

            self._save_data(fn_pat % (i, ), das)

            # Wait the period requested, excepted the last time
            if left > 1:
                sleept = (startt + p) - time.time()
                if sleept > 0:
                    time.sleep(sleept)
                else:
                    logging.info(
                        "Immediately starting next acquisition, %g s late",
                        -sleept)

        self._stop_saving_threads()  # Wait for all the data to be stored
        f.set_result(None)  # Indicate it's over
Exemple #10
0
    def test_load_full(self):
        """
        Check the whole sequence: saving calibration data to file, loading it
        back from file, finding it.
        """
        # Background data
        dbckg = numpy.array([1, 2, 2, 3, 4, 5, 4, 6, 9], dtype=numpy.uint16)
        dbckg.shape += (1, 1, 1, 1)
        wl_calib = 400e-9 + numpy.arange(dbckg.shape[0]) * 10e-9
        bckg = model.DataArray(dbckg, metadata={model.MD_WL_LIST: wl_calib})

        # Give one DA, the correct one, so expect to get it back

        # Compensation data
        dcalib = numpy.array([1, 1.3, 2, 3.5, 4, 5, 0.1, 6, 9.1],
                             dtype=numpy.float)
        dcalib.shape = (dcalib.shape[0], 1, 1, 1, 1)
        wl_calib = 400e-9 + numpy.arange(dcalib.shape[0]) * 10e-9
        calib = model.DataArray(dcalib, metadata={model.MD_WL_LIST: wl_calib})

        # More DataArrays, just to make it slightly harder to find the data
        data1 = model.DataArray(
            numpy.ones((1, 1, 1, 520, 230), dtype=numpy.uint16))
        data2 = model.DataArray(
            numpy.zeros((3, 1, 1, 520, 230), dtype=numpy.uint16))

        # RGB image
        thumb = model.DataArray(numpy.ones((520, 230, 3), dtype=numpy.uint8))

        full_coef = [data1, calib, data2]
        full_bckg = [data1, bckg, data2]

        for fmt in dataio.get_available_formats(os.O_WRONLY):
            exporter = dataio.get_converter(fmt)
            logging.info("Trying to export/import with %s", fmt)
            fn_coef = u"test_spec" + exporter.EXTENSIONS[0]
            exporter.export(fn_coef, full_coef, thumb)
            fn_bckg = u"test_bckg" + exporter.EXTENSIONS[0]
            exporter.export(fn_bckg, full_bckg, thumb)

            if fmt in dataio.get_available_formats(os.O_RDONLY):
                data_bckg = exporter.read_data(fn_bckg)
                ibckg = calibration.get_spectrum_data(data_bckg)
                data_coef = exporter.read_data(fn_coef)
                icoef = calibration.get_spectrum_efficiency(data_coef)
                numpy.testing.assert_equal(icoef, calib)
                numpy.testing.assert_almost_equal(
                    icoef.metadata[model.MD_WL_LIST],
                    calib.metadata[model.MD_WL_LIST])
                numpy.testing.assert_equal(ibckg, bckg)
                numpy.testing.assert_almost_equal(
                    ibckg.metadata[model.MD_WL_LIST],
                    bckg.metadata[model.MD_WL_LIST])
            try:
                os.remove(fn_coef)
            except OSError:
                logging.exception("Failed to delete the file %s", fn_coef)
            try:
                os.remove(fn_bckg)
            except OSError:
                logging.exception("Failed to delete the file %s", fn_bckg)
Exemple #11
0
def ShowAcquisitionFileDialog(parent, filename):
    """
    parent (wxFrame): parent window
    filename (string): full filename to propose by default
    Note: updates the acquisition configuration if the user did pick a new file
    return (string or None): the new filename (or the None if the user cancelled)
    """
    conf = get_acqui_conf()

    # Find the available formats (and corresponding extensions)
    formats_to_ext = dataio.get_available_formats()

    # current filename
    path, base = os.path.split(filename)

    # Note: When setting 'defaultFile' when creating the file dialog, the
    #   first filter will automatically be added to the name. Since it
    #   cannot be changed by selecting a different file type, this is big
    #   nono. Also, extensions with multiple periods ('.') are not correctly
    #   handled. The solution is to use the SetFilename method instead.
    wildcards, formats = formats_to_wildcards(formats_to_ext)
    dialog = wx.FileDialog(parent,
                           message="Choose a filename and destination",
                           defaultDir=path,
                           defaultFile="",
                           style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT,
                           wildcard=wildcards)

    # Select the last format used
    prev_fmt = conf.last_format
    try:
        idx = formats.index(conf.last_format)
    except ValueError:
        idx = 0
    dialog.SetFilterIndex(idx)

    # Strip the extension, so that if the user changes the file format,
    # it will not have 2 extensions in a row.
    if base.endswith(conf.last_extension):
        base = base[:-len(conf.last_extension)]
    dialog.SetFilename(base)

    # Show the dialog and check whether is was accepted or cancelled
    if dialog.ShowModal() != wx.ID_OK:
        return None

    # New location and name have been selected...
    # Store the path
    path = dialog.GetDirectory()
    conf.last_path = path

    # Store the format
    fmt = formats[dialog.GetFilterIndex()]
    conf.last_format = fmt

    # Check the filename has a good extension, or add the default one
    fn = dialog.GetFilename()
    ext = None
    for extension in formats_to_ext[fmt]:
        if fn.endswith(extension) and len(extension) > len(ext or ""):
            ext = extension

    if ext is None:
        if fmt == prev_fmt and conf.last_extension in formats_to_ext[fmt]:
            # if the format is the same (and extension is compatible): keep
            # the extension. This avoid changing the extension if it's not
            # the default one.
            ext = conf.last_extension
        else:
            ext = formats_to_ext[fmt][0]  # default extension
        fn += ext

    conf.last_extension = ext

    return os.path.join(path, fn)
Exemple #12
0
 def test_get_converter(self):
     fmts = get_available_formats()
     for fmt in fmts:
         fmt_mng = get_converter(fmt)
         self.assertGreaterEqual(len(fmt_mng.EXTENSIONS), 1)
Exemple #13
0
class TileAcqPlugin(Plugin):
    name = "Tile acquisition"
    __version__ = "1.7"
    __author__ = u"Éric Piel, Philip Winkler"
    __license__ = "GPLv2"

    # Describe how the values should be displayed
    # See odemis.gui.conf.data for all the possibilities
    vaconf = OrderedDict((
        (
            "nx",
            {
                "label": "Tiles X",
                "control_type": odemis.gui.CONTROL_INT,  # no slider
            }),
        (
            "ny",
            {
                "label": "Tiles Y",
                "control_type": odemis.gui.CONTROL_INT,  # no slider
            }),
        ("overlap", {
            "tooltip": "Approximate amount of overlapping area between tiles",
        }),
        ("filename", {
            "tooltip":
            "Pattern of each filename",
            "control_type":
            odemis.gui.CONTROL_SAVE_FILE,
            "wildcard":
            formats_to_wildcards(get_available_formats(os.O_WRONLY))[0],
        }),
        ("stitch", {
            "tooltip":
            "Use all the tiles to create a large-scale image at the end of the acquisition",
        }),
        ("expectedDuration", {}),
        ("totalArea", {
            "tooltip": "Approximate area covered by all the streams"
        }),
        ("fineAlign", {
            "label": "Fine alignment",
        })))

    def __init__(self, microscope, main_app):
        super(TileAcqPlugin, self).__init__(microscope, main_app)

        self._dlg = None
        self._tab = None  # the acquisition tab
        self.ft = model.InstantaneousFuture()  # acquisition future
        self.microscope = microscope

        # Can only be used with a microscope
        if not microscope:
            return
        else:
            # Check if microscope supports tiling (= has a sample stage)
            main_data = self.main_app.main_data
            if main_data.stage:
                self.addMenu("Acquisition/Tile...\tCtrl+G", self.show_dlg)
            else:
                logging.info(
                    "Tile acquisition not available as no stage present")
                return

        self._ovrl_stream = None  # stream for fine alignment

        self.nx = model.IntContinuous(5, (1, 1000), setter=self._set_nx)
        self.ny = model.IntContinuous(5, (1, 1000), setter=self._set_ny)
        self.overlap = model.FloatContinuous(20, (1, 80), unit="%")
        self.filename = model.StringVA("a.ome.tiff")
        self.expectedDuration = model.VigilantAttribute(1,
                                                        unit="s",
                                                        readonly=True)
        self.totalArea = model.TupleVA((1, 1), unit="m", readonly=True)
        self.stitch = model.BooleanVA(True)
        self.fineAlign = model.BooleanVA(False)
        # TODO: manage focus (eg, autofocus or ask to manual focus on the corners
        # of the ROI and linearly interpolate)

        self.nx.subscribe(self._update_exp_dur)
        self.ny.subscribe(self._update_exp_dur)
        self.fineAlign.subscribe(self._update_exp_dur)
        self.nx.subscribe(self._update_total_area)
        self.ny.subscribe(self._update_total_area)
        self.overlap.subscribe(self._update_total_area)

        # Warn if memory will be exhausted
        self.nx.subscribe(self._memory_check)
        self.ny.subscribe(self._memory_check)
        self.stitch.subscribe(self._memory_check)

    def _can_fine_align(self, streams):
        """
        Return True if with the given streams it would make sense to fine align
        streams (iterable of Stream)
        return (bool): True if at least a SEM and an optical stream are present
        """
        # check for a SEM stream
        for s in streams:
            if isinstance(s, EMStream):
                break
        else:
            return False

        # check for an optical stream
        # TODO: allow it also for ScannedFluoStream once fine alignment is supported
        # on confocal SECOM.
        for s in streams:
            if isinstance(s, stream.OpticalStream) and not isinstance(
                    s, stream.ScannedFluoStream):
                break
        else:
            return False

        return True

    def _get_visible_streams(self):
        """
        Returns the streams set as visible in the acquisition dialog
        """
        if not self._dlg:
            return []
        ss = self._dlg.view.getStreams() + self._dlg.hidden_view.getStreams()
        logging.debug("View has %d streams", len(ss))
        return ss

    def _get_new_filename(self):
        conf = get_acqui_conf()
        return os.path.join(
            conf.last_path,
            u"%s%s" % (time.strftime("%Y%m%d-%H%M%S"), conf.last_extension))

    def _on_streams_change(self, _=None):
        ss = self._get_visible_streams()
        # Subscribe to all relevant setting changes
        for s in ss:
            for va in self._get_settings_vas(s):
                va.subscribe(self._update_exp_dur)
                va.subscribe(self._memory_check)

        # Disable fine alignment if it's not possible
        if self._dlg:
            for entry in self._dlg.setting_controller.entries:
                if hasattr(entry, "vigilattr"):
                    if entry.vigilattr == self.fineAlign:
                        if self._can_fine_align(ss):
                            entry.lbl_ctrl.Enable(True)
                            entry.value_ctrl.Enable(True)
                            self._ovrl_stream = self._create_overlay_stream(ss)
                        else:
                            entry.lbl_ctrl.Enable(False)
                            entry.value_ctrl.Enable(False)
                        break

    def _unsubscribe_vas(self):
        ss = self._get_live_streams()

        # Unsubscribe from all relevant setting changes
        for s in ss:
            for va in self._get_settings_vas(s):
                va.unsubscribe(self._update_exp_dur)
                va.unsubscribe(self._memory_check)

    def _update_exp_dur(self, _=None):
        """
        Called when VA that affects the expected duration is changed
        """
        tat = self.estimate_time()

        # Typically there are a few more pixels inserted at the beginning of
        # each line for the settle time of the beam. We don't take this into
        # account and so tend to slightly under-estimate.

        # Use _set_value as it's read only
        self.expectedDuration._set_value(math.ceil(tat), force_write=True)

    def _update_total_area(self, _=None):
        """
        Called when VA that affects the total area is changed
        """
        # Find the stream with the smallest FoV
        try:
            fov = self._guess_smallest_fov()
        except ValueError as ex:
            logging.debug("Cannot compute total area: %s", ex)
            return

        # * number of tiles - overlap
        nx = self.nx.value
        ny = self.ny.value
        logging.debug("Updating total area based on FoV = %s m x (%d x %d)",
                      fov, nx, ny)
        ta = (fov[0] * (nx - (nx - 1) * self.overlap.value / 100),
              fov[1] * (ny - (ny - 1) * self.overlap.value / 100))

        # Use _set_value as it's read only
        self.totalArea._set_value(ta, force_write=True)

    def _set_nx(self, nx):
        """
        Check that stage limit is not exceeded during acquisition of nx tiles.
        It automatically clips the maximum value.
        """
        stage = self.main_app.main_data.stage
        orig_pos = stage.position.value
        tile_size = self._guess_smallest_fov()
        overlap = 1 - self.overlap.value / 100
        tile_pos_x = orig_pos["x"] + self.nx.value * tile_size[0] * overlap

        # The acquisition region only extends to the right and to the bottom, never
        # to the left of the top of the current position, so it is not required to
        # check the distance to the top and left edges of the stage.
        if hasattr(stage.axes["x"], "range"):
            max_x = stage.axes["x"].range[1]
            if tile_pos_x > max_x:
                nx = max(
                    1, int((max_x - orig_pos["x"]) / (overlap * tile_size[0])))
                logging.info(
                    "Restricting number of tiles in x direction to %i due to stage limit.",
                    nx)
        return nx

    def _set_ny(self, ny):
        """
        Check that stage limit is not exceeded during acquisition of ny tiles.
        It automatically clips the maximum value.
        """
        stage = self.main_app.main_data.stage
        orig_pos = stage.position.value
        tile_size = self._guess_smallest_fov()
        overlap = 1 - self.overlap.value / 100
        tile_pos_y = orig_pos["y"] - self.ny.value * tile_size[1] * overlap

        if hasattr(stage.axes["y"], "range"):
            min_y = stage.axes["y"].range[0]
            if tile_pos_y < min_y:
                ny = max(
                    1,
                    int(-(min_y - orig_pos["y"]) / (overlap * tile_size[1])))
                logging.info(
                    "Restricting number of tiles in y direction to %i due to stage limit.",
                    ny)

        return ny

    def _guess_smallest_fov(self):
        """
        Return (float, float): smallest width and smallest height of all the FoV
          Note: they are not necessarily from the same FoV.
        raise ValueError: If no stream selected
        """
        ss = self._get_live_streams()
        for s in ss:
            if isinstance(s, StaticStream):
                ss.remove(s)
        fovs = [self._get_fov(s) for s in ss]
        if not fovs:
            raise ValueError("No stream so no FoV, so no minimum one")

        return (min(f[0] for f in fovs), min(f[1] for f in fovs))

    def show_dlg(self):
        # TODO: if there is a chamber, only allow if there is vacuum

        # Fail if the live tab is not selected
        self._tab = self.main_app.main_data.tab.value
        if self._tab.name not in ("secom_live", "sparc_acqui"):
            box = wx.MessageDialog(
                self.main_app.main_frame,
                "Tiled acquisition must be done from the acquisition tab.",
                "Tiled acquisition not possible", wx.OK | wx.ICON_STOP)
            box.ShowModal()
            box.Destroy()
            return

        self._tab.streambar_controller.pauseStreams()

        # If no ROI is selected, select entire area
        try:
            if self._tab.tab_data_model.semStream.roi.value == UNDEFINED_ROI:
                self._tab.tab_data_model.semStream.roi.value = (0, 0, 1, 1)
        except AttributeError:
            pass  # Not a SPARC

        # Disable drift correction (on SPARC)
        if hasattr(self._tab.tab_data_model, "driftCorrector"):
            self._tab.tab_data_model.driftCorrector.roi.value = UNDEFINED_ROI

        ss = self._get_live_streams()
        self.filename.value = self._get_new_filename()

        dlg = AcquisitionDialog(
            self, "Tiled acquisition",
            "Acquire a large area by acquiring the streams multiple "
            "times over a grid.")
        self._dlg = dlg
        # don't allow adding/removing streams
        self._dlg.streambar_controller.to_static_mode()

        dlg.addSettings(self, self.vaconf)
        for s in ss:
            if isinstance(
                    s,
                (ARStream, SpectrumStream, MonochromatorSettingsStream)):
                # TODO: instead of hard-coding the list, a way to detect the type
                # of live image?
                logging.info(
                    "Not showing stream %s, for which the live image is not spatial",
                    s)
                dlg.addStream(s, index=None)
            else:
                dlg.addStream(s, index=0)

        dlg.addButton("Cancel")
        dlg.addButton("Acquire", self.acquire, face_colour='blue')

        # Update acq time and area when streams are added/removed. Add stream settings
        # to subscribed vas.
        dlg.view.stream_tree.flat.subscribe(self._update_exp_dur, init=True)
        dlg.view.stream_tree.flat.subscribe(self._update_total_area, init=True)
        dlg.view.stream_tree.flat.subscribe(self._on_streams_change, init=True)

        # Default fineAlign to True if it's possible
        # Use live streams to make the decision since visible streams might not be initialized yet
        # TODO: the visibility of the streams seems to be reset when the plugin is started,
        # a stream that is invisible in the main panel becomes visible. This should be fixed.
        if self._can_fine_align(ss):
            self.fineAlign.value = True
            self._ovrl_stream = self._create_overlay_stream(ss)

        # This looks tautologic, but actually, it forces the setter to check the
        # value is within range, and will automatically reduce it if necessary.
        self.nx.value = self.nx.value
        self.ny.value = self.ny.value
        self._memory_check()

        # TODO: disable "acquire" button if no stream selected.

        ans = dlg.ShowModal()
        if ans == 0 or ans == wx.ID_CANCEL:
            logging.info("Tiled acquisition cancelled")
            self.ft.cancel()
        elif ans == 1:
            logging.info("Tiled acquisition completed")
        else:
            logging.warning("Got unknown return code %s", ans)

        # Don't hold references
        self._unsubscribe_vas()
        dlg.Destroy()
        self._dlg = None

    # black list of VAs name which are known to not affect the acquisition time
    VAS_NO_ACQUSITION_EFFECT = ("image", "autoBC", "intensityRange",
                                "histogram", "is_active", "should_update",
                                "status", "name", "tint")

    def _create_overlay_stream(self, streams):
        for s in streams:
            if isinstance(s, EMStream):
                em_det = s.detector
                em_emt = s.emitter
            elif isinstance(s, stream.OpticalStream) and not isinstance(
                    s, stream.ScannedFluoStream):
                opt_det = s.detector
        main_data = self.main_app.main_data
        st = stream.OverlayStream("Fine alignment",
                                  opt_det,
                                  em_emt,
                                  em_det,
                                  opm=main_data.opm)
        st.dwellTime.value = main_data.fineAlignDwellTime.value
        return st

    def _get_settings_vas(self, stream):
        """
        Find all the VAs of a stream which can potentially affect the acquisition time
        return (set of VAs)
        """

        nvas = model.getVAs(stream)  # name -> va
        vas = set()
        # remove some VAs known to not affect the acquisition time
        for n, va in nvas.items():
            if n not in self.VAS_NO_ACQUSITION_EFFECT:
                vas.add(va)
        return vas

    def _get_live_streams(self):
        """
        Return all the live streams for tiled acquisition present in the given tab
        """
        tab_data = self._tab.tab_data_model
        ss = list(tab_data.streams.value)

        # On the SPARC, there is a Spot stream, which we don't need for live
        if hasattr(tab_data, "spotStream"):
            try:
                ss.remove(tab_data.spotStream)
            except ValueError:
                pass  # spotStream was not there anyway

        for s in ss:
            if isinstance(s, StaticStream):
                ss.remove(s)
        return ss

    def _get_acq_streams(self):
        """
        Return the streams that should be used for acquisition
        all_ss (list of Streams): all acquisition streams possibly including overlay stream
        stitch_ss (list of Streams): acquisition streams to be used for stitching (no overlay stream)
        """
        # On the SPARC, the acquisition streams are not the same as the live
        # streams. On the SECOM/DELPHI, they are the same (for now)
        live_st = self._get_visible_streams()
        tab_data = self._tab.tab_data_model

        if hasattr(tab_data, "acquisitionStreams"):
            acq_st = tab_data.acquisitionStreams
            # Discard the acquisition streams which are not visible
            stitch_ss = []
            for acs in acq_st:
                if (acs in live_st
                        or (isinstance(acs, MultipleDetectorStream)
                            and any(subs in live_st for subs in acs.streams))):
                    stitch_ss.append(acs)
        else:
            # No special acquisition streams
            stitch_ss = live_st[:]

        # Add the overlay stream if requested
        all_ss = stitch_ss[:]
        if self.fineAlign.value and self._can_fine_align(live_st):
            all_ss = stitch_ss + [self._ovrl_stream]
        return all_ss, stitch_ss

    def _generate_scanning_indices(self, rep):
        """
        Generate the explicit X/Y position of each tile, in the scanning order
        rep (int, int): X, Y number of tiles
        return (generator of tuple(int, int)): x/y positions, starting from 0,0
        """
        # For now we do forward/backward on X (fast), and Y (slowly)
        direction = 1
        for iy in range(rep[1]):
            if direction == 1:
                for ix in range(rep[0]):
                    yield (ix, iy)
            else:
                for ix in range(rep[0] - 1, -1, -1):
                    yield (ix, iy)

            direction *= -1

    def _move_to_tile(self, idx, orig_pos, tile_size, prev_idx):
        # Go left/down, with every second line backward:
        # similar to writing/scanning convention, but move of just one unit
        # every time.
        # A-->-->-->--v
        #             |
        # v--<--<--<---
        # |
        # --->-->-->--Z
        overlap = 1 - self.overlap.value / 100
        # don't move on the axis that is not supposed to have changed
        m = {}
        idx_change = numpy.subtract(idx, prev_idx)
        if idx_change[0]:
            m["x"] = orig_pos["x"] + idx[0] * tile_size[0] * overlap
        if idx_change[1]:
            m["y"] = orig_pos["y"] - idx[1] * tile_size[1] * overlap

        logging.debug("Moving to tile %s at %s m", idx, m)
        f = self.main_app.main_data.stage.moveAbs(m)
        try:
            speed = 10e-6  # m/s. Assume very low speed for timeout.
            t = math.hypot(tile_size[0] * overlap,
                           tile_size[1] * overlap) / speed + 1
            # add 1 to make sure it doesn't time out in case of a very small move
            f.result(t)
        except TimeoutError:
            logging.warning("Failed to move to tile %s", idx)
            self.ft.running_subf.cancel()
            # Continue acquiring anyway... maybe it has moved somewhere near

    def _get_fov(self, sd):
        """
        sd (Stream or DataArray): If it's a stream, it must be a live stream,
          and the FoV will be estimated based on the settings.
        return (float, float): width, height in m
        """
        if isinstance(sd, model.DataArray):
            # The actual FoV, as the data recorded it
            return (sd.shape[0] * sd.metadata[model.MD_PIXEL_SIZE][0],
                    sd.shape[1] * sd.metadata[model.MD_PIXEL_SIZE][1])
        elif isinstance(sd, Stream):
            # Estimate the FoV, based on the emitter/detector settings
            if isinstance(sd, SEMStream):
                ebeam = sd.emitter
                return (ebeam.shape[0] * ebeam.pixelSize.value[0],
                        ebeam.shape[1] * ebeam.pixelSize.value[1])

            elif isinstance(sd, CameraStream):
                ccd = sd.detector
                # Look at what metadata the images will get
                md = ccd.getMetadata().copy()
                img.mergeMetadata(
                    md)  # apply correction info from fine alignment

                shape = ccd.shape[0:2]
                pxs = md[model.MD_PIXEL_SIZE]
                # compensate for binning
                binning = ccd.binning.value
                pxs = [p / b for p, b in zip(pxs, binning)]
                return shape[0] * pxs[0], shape[1] * pxs[1]

            elif isinstance(sd, RepetitionStream):
                # CL, Spectrum, AR
                ebeam = sd.emitter
                global_fov = (ebeam.shape[0] * ebeam.pixelSize.value[0],
                              ebeam.shape[1] * ebeam.pixelSize.value[1])
                l, t, r, b = sd.roi.value
                fov = abs(r - l) * global_fov[0], abs(b - t) * global_fov[1]
                return fov
            else:
                raise TypeError("Unsupported Stream %s" % (sd, ))
        else:
            raise TypeError("Unsupported object")

    def _cancel_acquisition(self, future):
        """
        Canceler of acquisition task.
        """
        logging.debug("Canceling acquisition...")

        with future._task_lock:
            if future._task_state == FINISHED:
                return False
            future._task_state = CANCELLED
            future.running_subf.cancel()
            logging.debug("Acquisition cancelled.")
        return True

    STITCH_SPEED = 1e-8  # s/px
    MOVE_SPEED = 1e3  # s/m

    def estimate_time(self, remaining=None):
        """
        Estimates duration for acquisition and stitching.
        """
        ss, stitch_ss = self._get_acq_streams()

        if remaining is None:
            remaining = self.nx.value * self.ny.value
        acqt = acqmng.estimateTime(ss)

        if self.stitch.value:
            # Estimate stitching time based on number of pixels in the overlapping part
            max_pxs = 0
            for s in stitch_ss:
                for sda in s.raw:
                    pxs = sda.shape[0] * sda.shape[1]
                    if pxs > max_pxs:
                        max_pxs = pxs

            stitcht = self.nx.value * self.ny.value * max_pxs * self.overlap.value * self.STITCH_SPEED
        else:
            stitcht = 0

        try:
            movet = max(
                self._guess_smallest_fov()) * self.MOVE_SPEED * (remaining - 1)
            # current tile is part of remaining, so no need to move there
        except ValueError:  # no current streams
            movet = 0.5

        return acqt * remaining + movet + stitcht

    def sort_das(self, das, ss):
        """
        Sorts das based on priority for stitching, i.e. largest SEM da first, then
        other SEM das, and finally das from other streams.
        das: list of DataArrays
        ss: streams from which the das were extracted

        returns: list of DataArrays, reordered input
        """
        # Add the ACQ_TYPE metadata (in case it's not there)
        # In practice, we check the stream the DA came from, and based on the stream
        # type, fill the metadata
        # TODO: make sure acquisition type is added to data arrays before, so this
        # code can be deleted
        for da in das:
            if model.MD_ACQ_TYPE in da.metadata:
                continue
            for s in ss:
                for sda in s.raw:
                    if da is sda:  # Found it!
                        if isinstance(s, EMStream):
                            da.metadata[model.MD_ACQ_TYPE] = model.MD_AT_EM
                        elif isinstance(s, ARStream):
                            da.metadata[model.MD_ACQ_TYPE] = model.MD_AT_AR
                        elif isinstance(s, SpectrumStream):
                            da.metadata[
                                model.MD_ACQ_TYPE] = model.MD_AT_SPECTRUM
                        elif isinstance(s, FluoStream):
                            da.metadata[model.MD_ACQ_TYPE] = model.MD_AT_FLUO
                        elif isinstance(s, MultipleDetectorStream):
                            if model.MD_OUT_WL in da.metadata:
                                da.metadata[model.MD_ACQ_TYPE] = model.MD_AT_CL
                            else:
                                da.metadata[model.MD_ACQ_TYPE] = model.MD_AT_EM
                        else:
                            logging.warning("Unknown acq stream type for %s",
                                            s)
                        break
                if model.MD_ACQ_TYPE in da.metadata:
                    # if da is found, no need to search other streams
                    break
            else:
                logging.warning("Couldn't find the stream for DA of shape %s",
                                da.shape)

        # save tiles for stitching
        if self.stitch.value:
            # Remove the DAs we don't want to (cannot) stitch
            das = [da for da in das if da.metadata[model.MD_ACQ_TYPE] \
                   not in (model.MD_AT_AR, model.MD_AT_SPECTRUM)]

            def leader_quality(da):
                """
                return int: The bigger the more leadership
                """
                # For now, we prefer a lot the EM images, because they are usually the
                # one with the smallest FoV and the most contrast
                if da.metadata[model.MD_ACQ_TYPE] == model.MD_AT_EM:
                    return numpy.prod(
                        da.shape)  # More pixel to find the overlap
                elif da.metadata[model.MD_ACQ_TYPE]:
                    # A lot less likely
                    return numpy.prod(da.shape) / 100

            das.sort(key=leader_quality, reverse=True)
            das = tuple(das)
        return das

    def _check_fov(self, das, sfov):
        """
        Checks the fov based on the data arrays.
        das: list of DataArryas
        sfov: previous estimate for the fov
        """
        afovs = [self._get_fov(d) for d in das]
        asfov = (min(f[1] for f in afovs), min(f[0] for f in afovs))
        if not all(util.almost_equal(e, a) for e, a in zip(sfov, asfov)):
            logging.warning("Unexpected min FoV = %s, instead of %s", asfov,
                            sfov)
            sfov = asfov
        return sfov

    def _estimateStreamPixels(self, s):
        """
        return (int): the number of pixels the stream will generate during an
          acquisition
        """
        px = 0
        if isinstance(s, MultipleDetectorStream):
            for st in s.streams:
                # For the EMStream of a SPARC MDStream, it's just one pixel per
                # repetition (excepted in case  of fuzzing, but let's be optimistic)
                if isinstance(st, (EMStream, CLStream)):
                    px += 1
                else:
                    px += self._estimateStreamPixels(st)

            if hasattr(s, 'repetition'):
                px *= s.repetition.value[0] * s.repetition.value[1]

            return px
        elif isinstance(s, (ARStream, SpectrumStream)):
            # Temporarily reports 0 px, as we don't stitch these streams for now
            return 0

        if hasattr(s, 'emtResolution'):
            px = numpy.prod(s.emtResolution.value)
        elif hasattr(s, 'detResolution'):
            px = numpy.prod(s.detResolution.value)
        elif model.hasVA(s.detector, "resolution"):
            px = numpy.prod(s.detector.resolution.value)
        elif model.hasVA(s.emitter, "resolution"):
            px = numpy.prod(s.emitter.resolution.value)
        else:
            # This shouldn't happen, but let's "optimistic" by assuming it'll
            # only acquire one pixel.
            logging.info("Resolution of stream %s cannot be determined.", s)
            px = 1

        return px

    MEMPP = 22  # bytes per pixel, found empirically

    @call_in_wx_main
    def _memory_check(self, _=None):
        """
        Makes an estimate for the amount of memory that will be consumed during
        stitching and compares it to the available memory on the computer.
        Displays a warning if memory exceeds available memory.
        """
        if not self._dlg:  # Already destroyed? => no need to care
            return

        if self.stitch.value:
            # Number of pixels for acquisition
            pxs = sum(
                self._estimateStreamPixels(s)
                for s in self._get_acq_streams()[1])
            pxs *= self.nx.value * self.ny.value

            # Memory calculation
            mem_est = pxs * self.MEMPP
            mem_computer = psutil.virtual_memory().total
            logging.debug("Estimating %g GB needed, while %g GB available",
                          mem_est / 1024**3, mem_computer / 1024**3)
            # Assume computer is using 2 GB RAM for odemis and other programs
            mem_sufficient = mem_est < mem_computer - (2 * 1024**3)
        else:
            mem_sufficient = True

        # Display warning
        if mem_sufficient:
            self._dlg.setAcquisitionInfo(None)
        else:
            txt = (
                "Stitching this area requires %.1f GB of memory.\n"
                "Running the acquisition might cause your computer to crash." %
                (mem_est / 1024**3, ))
            self._dlg.setAcquisitionInfo(txt, lvl=logging.ERROR)

    def acquire(self, dlg):
        main_data = self.main_app.main_data
        str_ctrl = self._tab.streambar_controller
        str_ctrl.pauseStreams()
        dlg.pauseSettings()
        self._unsubscribe_vas()

        orig_pos = main_data.stage.position.value
        trep = (self.nx.value, self.ny.value)
        nb = trep[0] * trep[1]
        # It's not a big deal if it was a bad guess as we'll use the actual data
        # before the first move
        sfov = self._guess_smallest_fov()
        fn = self.filename.value
        exporter = dataio.find_fittest_converter(fn)
        fn_bs, fn_ext = udataio.splitext(fn)

        ss, stitch_ss = self._get_acq_streams()
        end = self.estimate_time() + time.time()

        ft = model.ProgressiveFuture(end=end)
        self.ft = ft  # allows future to be canceled in show_dlg after closing window
        ft.running_subf = model.InstantaneousFuture()
        ft._task_state = RUNNING
        ft._task_lock = threading.Lock()
        ft.task_canceller = self._cancel_acquisition  # To allow cancelling while it's running
        ft.set_running_or_notify_cancel()  # Indicate the work is starting now
        dlg.showProgress(ft)

        # For stitching only
        da_list = []  # for each position, a list of DataArrays
        i = 0
        prev_idx = [0, 0]
        try:
            for ix, iy in self._generate_scanning_indices(trep):
                logging.debug("Acquiring tile %dx%d", ix, iy)
                self._move_to_tile((ix, iy), orig_pos, sfov, prev_idx)
                prev_idx = ix, iy
                # Update the progress bar
                ft.set_progress(end=self.estimate_time(nb - i) + time.time())

                ft.running_subf = acqmng.acquire(
                    ss, self.main_app.main_data.settings_obs)
                das, e = ft.running_subf.result(
                )  # blocks until all the acquisitions are finished
                if e:
                    logging.warning(
                        "Acquisition for tile %dx%d partially failed: %s", ix,
                        iy, e)

                if ft._task_state == CANCELLED:
                    raise CancelledError()

                # TODO: do in a separate thread
                fn_tile = "%s-%.5dx%.5d%s" % (fn_bs, ix, iy, fn_ext)
                logging.debug("Will save data of tile %dx%d to %s", ix, iy,
                              fn_tile)
                exporter.export(fn_tile, das)

                if ft._task_state == CANCELLED:
                    raise CancelledError()

                if self.stitch.value:
                    # Sort tiles (largest sem on first position)
                    da_list.append(self.sort_das(das, stitch_ss))

                # Check the FoV is correct using the data, and if not update
                if i == 0:
                    sfov = self._check_fov(das, sfov)
                i += 1

            # Move stage to original position
            main_data.stage.moveAbs(orig_pos)

            # Stitch SEM and CL streams
            st_data = []
            if self.stitch.value and (not da_list or not da_list[0]):
                # if only AR or Spectrum are acquired
                logging.warning(
                    "No stream acquired that can be used for stitching.")
            elif self.stitch.value:
                logging.info("Acquisition completed, now stitching...")
                ft.set_progress(end=self.estimate_time(0) + time.time())

                logging.info("Computing big image out of %d images",
                             len(da_list))
                das_registered = stitching.register(da_list)

                # Select weaving method
                # On a Sparc system the mean weaver gives the best result since it
                # smoothes the transitions between tiles. However, using this weaver on the
                # Secom/Delphi generates an image with dark stripes in the overlap regions which are
                # the result of carbon decomposition effects that typically occur in samples imaged
                # by these systems. To mediate this, we use the
                # collage_reverse weaver that only shows the overlap region of the tile that
                # was imaged first.
                if self.microscope.role in ("secom", "delphi"):
                    weaving_method = WEAVER_COLLAGE_REVERSE
                    logging.info(
                        "Using weaving method WEAVER_COLLAGE_REVERSE.")
                else:
                    weaving_method = WEAVER_MEAN
                    logging.info("Using weaving method WEAVER_MEAN.")

                # Weave every stream
                if isinstance(das_registered[0], tuple):
                    for s in range(len(das_registered[0])):
                        streams = []
                        for da in das_registered:
                            streams.append(da[s])
                        da = stitching.weave(streams, weaving_method)
                        da.metadata[
                            model.MD_DIMS] = "YX"  # TODO: do it in the weaver
                        st_data.append(da)
                else:
                    da = stitching.weave(das_registered, weaving_method)
                    st_data.append(da)

                # Save
                exporter = dataio.find_fittest_converter(fn)
                if exporter.CAN_SAVE_PYRAMID:
                    exporter.export(fn, st_data, pyramid=True)
                else:
                    logging.warning(
                        "File format doesn't support saving image in pyramidal form"
                    )
                    exporter.export(fn, st_data)

            ft.set_result(None)  # Indicate it's over

            # End of the (completed) acquisition
            if ft._task_state == CANCELLED:
                raise CancelledError()
            dlg.Close()

            # Open analysis tab
            if st_data:
                popup.show_message(self.main_app.main_frame,
                                   "Tiled acquisition complete",
                                   "Will display stitched image")
                self.showAcquisition(fn)
            else:
                popup.show_message(self.main_app.main_frame,
                                   "Tiled acquisition complete",
                                   "Will display last tile")
                # It's easier to know the last filename, and it's also the most
                # interesting for the user, as if something went wrong (eg, focus)
                # it's the tile the most likely to show it.
                self.showAcquisition(fn_tile)

            # TODO: also export a full image (based on reported position, or based
            # on alignment detection)
        except CancelledError:
            logging.debug("Acquisition cancelled")
            dlg.resumeSettings()
        except Exception as ex:
            logging.exception("Acquisition failed.")
            ft.running_subf.cancel()
            ft.set_result(None)
            # Show also in the window. It will be hidden next time a setting is changed.
            self._dlg.setAcquisitionInfo("Acquisition failed: %s" % (ex, ),
                                         lvl=logging.ERROR)
        finally:
            logging.info("Tiled acquisition ended")
            main_data.stage.moveAbs(orig_pos)
Exemple #14
0
class RGBCLIntensity(Plugin):
    name = "RGB CL-intensity"
    __version__ = "1.2"
    __author__ = u"Toon Coenen & Éric Piel"
    __license__ = "GNU General Public License 2"

    vaconf = OrderedDict((
        ("filter1", {
            "label": "Blue",
            "choices": util.format_band_choices,
        }),
        ("filter2", {
            "label": "Green",
            "choices": util.format_band_choices,
        }),
        ("filter3", {
            "label": "Red",
            "choices": util.format_band_choices,
        }),
        ("filename", {
            "control_type": odemis.gui.CONTROL_SAVE_FILE,
            "wildcard": formats_to_wildcards(get_available_formats(os.O_WRONLY))[0],
        }),
        ("expectedDuration", {
        }),
    ))

    def __init__(self, microscope, main_app):
        super(RGBCLIntensity, self).__init__(microscope, main_app)

        # Can only be used on a SPARC with a CL-intensity detector
        if not microscope:
            return
        try:
            self.ebeam = model.getComponent(role="e-beam")
            self.cldetector = model.getComponent(role="cl-detector")
            self.filterwheel = model.getComponent(role="cl-filter")
            self.sed = model.getComponent(role="se-detector")
            # We could also check the filter wheel has at least 3 filters, but
            # let's not be too picky, if the user has installed the plugin, he
            # probably wants to use it anyway.
        except LookupError:
            logging.info("Hardware not found, cannot use the RGB CL plugin")
            return

        # The SEM survey and CLi stream (will be updated when showing the window)
        self._survey_s = None
        self._cl_int_s = None
        self._acqui_tab = main_app.main_data.getTabByName("sparc_acqui").tab_data_model

        # The settings to be displayed in the dialog
        # TODO: pick better default filters than first 3 filters
        # => based on the wavelengths fitting best RGB, or the names (eg, "Blue"),
        # and avoid "pass-through".
        fbchoices = self.filterwheel.axes["band"].choices
        if isinstance(fbchoices, dict):
            fbvalues = sorted(fbchoices.keys())
        else:
            fbvalues = fbchoices
        # FloatEnumerated because filter positions can be in rad (ie, not int positions)
        self.filter1 = model.FloatEnumerated(fbvalues[0],
                                             choices=fbchoices)
        self.filter2 = model.FloatEnumerated(fbvalues[min(1, len(fbvalues) - 1)],
                                             choices=fbchoices)
        self.filter3 = model.FloatEnumerated(fbvalues[min(2, len(fbvalues) - 1)],
                                             choices=fbchoices)

        self._filters = [self.filter1, self.filter2, self.filter3]
        self._colours = [(0, 0, 255), (0, 255, 0), (255, 0, 0)]  # B, G, R

        self.filename = model.StringVA("a.tiff")
        self.expectedDuration = model.VigilantAttribute(1, unit="s", readonly=True)

        self.addMenu("Acquisition/RGB CL intensity...", self.start)

    def _read_config(self):
        """
        Updates the filter values based on the content of the config file
        It will not fail (if there is no config file, or the config file is incorrect).
        In the worst case, it will not update the filter values.
        """
        try:
            config = configparser.SafeConfigParser()  # Note: in Python 3, this is now also just called "ConfigParser"
            config.read(CONF_FILE)  # Returns empty config if no file
            for fname, va in zip(("blue", "green", "red"), self._filters):
                fval = config.getfloat("filters", fname)
                # Pick the same/closest value if it's available in the choices, always returns something valid
                va.value = odemis.util.find_closest(fval, va.choices)
                logging.debug("Updated %s to %s (from config %s)", fname, va.value, fval)

        except (configparser.NoOptionError, configparser.NoSectionError) as ex:
            logging.info("Config file is not existing or complete, no restoring filter values: %s", ex)
        except Exception:
            logging.exception("Failed to open the config file")

    def _write_config(self):
        """
        Store the filter values into the config file
        """
        try:
            config = configparser.SafeConfigParser()
            config.add_section("filters")
            config.set("filters", "blue", "%f" % self.filter1.value)
            config.set("filters", "green", "%f" % self.filter2.value)
            config.set("filters", "red", "%f" % self.filter3.value)

            with open(CONF_FILE, "w") as configfile:
                config.write(configfile)
        except Exception:
            logging.exception("Failed to save the config file")

    def _update_exp_dur(self, _=None):
        """
        Called when VA that affects the expected duration is changed
        """
        at = self.estimateAcquisitionTime()

        # Use _set_value as it's read only
        self.expectedDuration._set_value(round(at), force_write=True)

    def _calc_acq_times(self):
        """
        Calculate exposure times for different elements of the acquisition.
        return (3 float): in s
        """
        dt_survey = 0
        dt_cl = 0
        dt_drift = 0

        if self._survey_s:
            dt_survey = self._survey_s.estimateAcquisitionTime()

        if self._cl_int_s:
            dt_cl = self._cl_int_s.estimateAcquisitionTime()

        # For each CL filter acquisition, the drift correction will run once
        # (*in addition* to the standard in-frame drift correction)
        dc = self._acqui_tab.driftCorrector
        if dc.roi.value != UNDEFINED_ROI:
            drift_est = drift.AnchoredEstimator(self.ebeam, self.sed,
                                    dc.roi.value, dc.dwellTime.value)
            dt_drift = drift_est.estimateAcquisitionTime() + 0.1

        return dt_survey, dt_cl, dt_drift

    def estimateAcquisitionTime(self):
        """
        Estimate the time it will take for the measurement.
        The number of pixels still has to be defined in the stream part
        """
        dt_survey, dt_cl, dt_drift = self._calc_acq_times()
        return dt_survey + len(self._filters) * (dt_cl + dt_drift)

    def _get_new_filename(self):
        conf = get_acqui_conf()
        # Use TIFF by default, as it's a little bit more user-friendly for simple
        # coloured images.
        return os.path.join(
            conf.last_path,
            u"%s%s" % (time.strftime("%Y%m%d-%H%M%S"), ".tiff")
        )

    def _get_sem_survey(self):
        """
        Finds the SEM survey stream in the acquisition tab
        return (SEMStream or None): None if not found
        """
        tab_data = self.main_app.main_data.tab.value.tab_data_model
        for s in tab_data.streams.value:
            if isinstance(s, stream.SEMStream):
                return s

        logging.warning("No SEM survey stream found")
        return None

    def _get_cl_intensity(self):
        """
        Finds the CL intensity acquisition (aka MD) stream in the acquisition tab
        return (SEMStream or None): None if not found
        """
        tab_data = self.main_app.main_data.tab.value.tab_data_model

        # Look for the MultiDetector stream which contains a CL intensity stream
        for mds in tab_data.acquisitionStreams:
            if not isinstance(mds, stream.MultipleDetectorStream):
                continue
            for ss in mds.streams:
                if isinstance(ss, stream.CLSettingsStream):
                    return mds

        logging.warning("No CL intensity stream found")
        return None

    def _pause_streams(self):
        """
        return (list of streams): the streams paused
        """
        try:
            str_ctrl = self.main_app.main_data.tab.value.streambar_controller
        except AttributeError:  # Odemis v2.6 and earlier versions
            str_ctrl = self.main_app.main_data.tab.value.stream_controller
        return str_ctrl.pauseStreams()

    def start(self):
        # Check the acquisition tab is open, and a CL-intensity stream is available
        ct = self.main_app.main_data.tab.value
        if ct.name == "sparc_acqui":
            cls = self._get_cl_intensity()
        else:
            cls = None
        if not cls:
            logging.info("Failed to start RGB CL intensity stream")
            dlg = wx.MessageDialog(self.main_app.main_frame,
                                   "No CL-intensity stream is currently open.\n"
                                   "You need to open a CL intensity stream "
                                   "and set the acquisition parameters.\n",
                                   caption="RGB CL intensity",
                                   style=wx.OK | wx.ICON_WARNING)
            dlg.ShowModal()
            dlg.Destroy()
            return

        # Normally, since Odemis v3.1, all CLSettingsStreams on systems with a cl-filter
        # have a "local axis" as a VA "axisFilter".
        assert any(hasattr(s, "axisFilter") for s in cls.streams)

        self._pause_streams()

        self._read_config()  # Restore filter values from the config file

        # immediately switch optical path, to save time
        self.main_app.main_data.opm.setPath(cls)  # non-blocking

        # Get survey stream too
        self._survey_s = self._get_sem_survey()
        self._cl_int_s = cls

        self._update_exp_dur()

        # Create a window
        dlg = AcquisitionDialog(self, "RGB CL intensity acquisition",
                                "Acquires a RGB CL-intensity image\n"
                                "Specify the relevant settings and start the acquisition\n"
                                )

        self.filename.value = self._get_new_filename()
        dlg.addSettings(self, conf=self.vaconf)
        dlg.addButton("Close")
        dlg.addButton("Acquire", self.acquire, face_colour='blue')

        # Show the window, and wait until the acquisition is over
        ans = dlg.ShowModal()

        # The window is closed
        if ans == 0:
            logging.debug("RGB CL intensity acquisition cancelled")
        elif ans == 1:
            logging.debug("RGB CL intensity acquisition completed")
        else:
            logging.warning("Unknown return code %d", ans)

        self._write_config()  # Store the filter values to restore them on next time

        # Make sure we don't hold reference to the streams forever
        self._survey_s = None
        self._cl_int_s = None

        dlg.Destroy()

    def acquire(self, dlg):
        # Stop the spot stream and any other stream playing to not interfere with the acquisition
        self._pause_streams()

        # We use the acquisition CL intensity stream, so there is a concurrent
        # SEM acquisition (in addition to the survey). The drift correction is run both
        # during the acquisition, and in-between each acquisition. The drift
        # between each acquisition is corrected by updating the metadata. So
        # it's some kind of post-processing compensation. The advantage is that
        # it doesn't affect the data, and if the entire field of view is imaged,
        # it still works properly, but when opening in another software (eg,
        # ImageJ), that compensation will not be applied automatically).
        # Alternatively, the images could be cropped to just the region which is
        # common for all the acquisitions, but there might then be data loss.
        # Note: The compensation could also be done by updating the ROI of the
        # CL stream. However, in the most common case, the user will acquire the
        # entire area, so drift compensation cannot be applied. We could also
        # use SEM concurrent stream and measure drift afterwards but that
        # doubles the dwell time).
        dt_survey, dt_clint, dt_drift = self._calc_acq_times()
        cl_set_s = next(s for s in self._cl_int_s.streams if hasattr(s, "axisFilter"))

        das = []
        fn = self.filename.value
        exporter = dataio.find_fittest_converter(fn)

        # Prepare the Future to represent the acquisition progress, and cancel
        dur = self.expectedDuration.value
        end = time.time() + dur
        ft = model.ProgressiveFuture(end=end)

        # Allow to cancel by cancelling also the sub-task
        def canceller(future):
            # To be absolutely correct, there should be a lock, however, in
            # practice in the worse case the task will run a little longer before
            # stopping.
            if future._subf:
                logging.debug("Cancelling sub future %s", future._subf)
                return future._subf.cancel()

        ft._subf = None  # sub-future corresponding to the task currently happening
        ft.task_canceller = canceller  # To allow cancelling while it's running

        # Indicate the work is starting now
        ft.set_running_or_notify_cancel()
        dlg.showProgress(ft)

        try:
            # acquisition of SEM survey
            if self._survey_s:
                ft._subf = acqmng.acquire([self._survey_s], self.main_app.main_data.settings_obs)
                d, e = ft._subf.result()
                das.extend(d)
                if e:
                    raise e

            if ft.cancelled():
                raise CancelledError()

            dur -= dt_survey
            ft.set_progress(end=time.time() + dur)

            # Extra drift correction between each filter
            dc_roi = self._acqui_tab.driftCorrector.roi.value
            dc_dt = self._acqui_tab.driftCorrector.dwellTime.value

            # drift correction vector
            tot_dc_vect = (0, 0)
            if dc_roi != UNDEFINED_ROI:
                drift_est = drift.AnchoredEstimator(self.ebeam, self.sed,
                                                    dc_roi, dc_dt)
                drift_est.acquire()
                dur -= dt_drift
                ft.set_progress(end=time.time() + dur)
            else:
                drift_est = None

            # Loop over the filters, for now it's fixed to 3 but this could be flexible
            for fb, co in zip(self._filters, self._colours):
                cl_set_s.axisFilter.value = fb.value
                logging.debug("Using band %s", fb.value)
                ft.set_progress(end=time.time() + dur)

                # acquire CL stream
                ft._subf = acqmng.acquire([self._cl_int_s], self.main_app.main_data.settings_obs)
                d, e = ft._subf.result()
                if e:
                    raise e
                if ft.cancelled():
                    raise CancelledError()
                dur -= dt_clint
                ft.set_progress(end=time.time() + dur)

                if drift_est:
                    drift_est.acquire()
                    dc_vect = drift_est.estimate()
                    pxs = self.ebeam.pixelSize.value
                    tot_dc_vect = (tot_dc_vect[0] + dc_vect[0] * pxs[0],
                                   tot_dc_vect[1] - dc_vect[1] * pxs[1])  # Y is inverted in physical coordinates
                    dur -= dt_drift
                    ft.set_progress(end=time.time() + dur)

                # Convert the CL intensity stream into a "fluo" stream so that it's nicely displayed (in colour) in the viewer
                for da in d:
                    # Update the center position based on drift
                    pos = da.metadata[model.MD_POS]
                    logging.debug("Correcting position for drift by %s m", tot_dc_vect)
                    pos = tuple(p + dc for p, dc in zip(pos, tot_dc_vect))
                    da.metadata[model.MD_POS] = pos

                    if model.MD_OUT_WL not in da.metadata:
                        # check it's not the SEM concurrent stream
                        continue
                    # Force the colour, which forces it to be a FluoStream when
                    # opening it in the analysis tab, for nice colour merging.
                    da.metadata[model.MD_USER_TINT] = co

                das.extend(d)
                if ft.cancelled():
                    raise CancelledError()

            ft.set_result(None)  # Indicate it's over

        except CancelledError as ex:
            logging.debug("Acquisition cancelled")
            return
        except Exception as ex:
            logging.exception("Failure during RGB CL acquisition")
            ft.set_exception(ex)
            # TODO: show the error in the plugin window
            return

        if ft.cancelled() or not das:
            return

        logging.debug("Will save data to %s", fn)
        exporter.export(fn, das)
        self.showAcquisition(fn)
        dlg.Close()
Exemple #15
0
def ShowAcquisitionFileDialog(parent, filename):
    """
    parent (wxFrame): parent window
    filename (string): full filename to propose by default
    Note: updates the acquisition configuration if the user did pick a new file
    return (string or None): the new filename (or the None if the user cancelled)
    """
    conf = get_acqui_conf()

    # Find the available formats (and corresponding extensions)
    formats_to_ext = dataio.get_available_formats()

    # current filename
    path, base = os.path.split(filename)

    # Note: When setting 'defaultFile' when creating the file dialog, the
    #   first filter will automatically be added to the name. Since it
    #   cannot be changed by selecting a different file type, this is big
    #   nono. Also, extensions with multiple periods ('.') are not correctly
    #   handled. The solution is to use the SetFilename method instead.
    wildcards, formats = formats_to_wildcards(formats_to_ext)
    dialog = wx.FileDialog(parent,
                           message="Choose a filename and destination",
                           defaultDir=path,
                           defaultFile="",
                           style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT,
                           wildcard=wildcards)

    # Select the last format used
    prev_fmt = conf.last_format
    try:
        idx = formats.index(conf.last_format)
    except ValueError:
        idx = 0
    dialog.SetFilterIndex(idx)

    # Strip the extension, so that if the user changes the file format,
    # it will not have 2 extensions in a row.
    if base.endswith(conf.last_extension):
        base = base[:-len(conf.last_extension)]
    dialog.SetFilename(base)

    # Show the dialog and check whether is was accepted or cancelled
    if dialog.ShowModal() != wx.ID_OK:
        return None

    # New location and name have been selected...
    # Store the path
    path = dialog.GetDirectory()
    conf.last_path = path

    # Store the format
    fmt = formats[dialog.GetFilterIndex()]
    conf.last_format = fmt

    # Check the filename has a good extension, or add the default one
    fn = dialog.GetFilename()
    ext = None
    for extension in formats_to_ext[fmt]:
        if fn.endswith(extension) and len(extension) > len(ext or ""):
            ext = extension

    if ext is None:
        if fmt == prev_fmt and conf.last_extension in formats_to_ext[fmt]:
            # if the format is the same (and extension is compatible): keep
            # the extension. This avoid changing the extension if it's not
            # the default one.
            ext = conf.last_extension
        else:
            ext = formats_to_ext[fmt][0] # default extension
        fn += ext

    conf.last_extension = ext

    return os.path.join(path, fn)
Exemple #16
0
def main(args):
    """
    Handles the command line arguments
    args is the list of arguments passed
    return (int): value to return to the OS as program exit code
    """
    # arguments handling
    parser = argparse.ArgumentParser(description="File format conversion utility")

    parser.add_argument('--version', dest="version", action='store_true',
                        help="show program's version number and exit")
    parser.add_argument("--input", "-i", dest="input",
                        help="name of the input file")
    parser.add_argument("--effcomp", dest="effcomp",
                        help="name of a spectrum efficiency compensation table (in CSV format)")
    fmts = dataio.get_available_formats(os.O_WRONLY)
    parser.add_argument("--output", "-o", dest="output",
            help="name of the output file. "
            "The file format is derived from the extension (%s are supported)." %
            (" and ".join(fmts)))

    parser.add_argument("--minus", "-m", dest="minus", action='append',
            help="name of an acquisition file whose data is subtracted from the input file.")

    # TODO: --export (spatial) image that defaults to a HFW corresponding to the
    # smallest image, and can be overridden by --hfw xxx (in µm).
    # TODO: --range parameter to select which image to select from the input
    #      (like: 1-4,5,6-10,12)

    options = parser.parse_args(args[1:])

    # Cannot use the internal feature, because it doesn't support multi-line
    if options.version:
        print (odemis.__fullname__ + " " + odemis.__version__ + "\n" +
               odemis.__copyright__ + "\n" +
               "Licensed under the " + odemis.__license__)
        return 0

    infn = options.input
    ecfn = options.effcomp
    outfn = options.output

    if not (infn or ecfn) or not outfn:
        raise ValueError("--input/--effcomp and --output arguments must be provided.")

    if infn and ecfn:
        raise ValueError("--input and --effcomp should not be provided simultaneously.")

    if infn:
        data, thumbs = open_acq(infn)
        logging.info("File contains %d %s (and %d %s)",
                     len(data), ngettext("image", "images", len(data)),
                     len(thumbs), ngettext("thumbnail", "thumbnails", len(thumbs)))
    elif ecfn:
        data = open_ec(ecfn)
        thumbs = []
        logging.info("File contains %d coefficients", data[0].shape[0])

    if options.minus:
        if thumbs:
            logging.info("Dropping thumbnail due to subtraction")
            thumbs = []
        for fn in options.minus:
            sdata, sthumbs = open_acq(fn)
            data = minus(data, sdata)

    save_acq(outfn, data, thumbs)

    logging.info("Successfully generated file %s", outfn)
Exemple #17
0
def main(args):
    """
    Handles the command line arguments
    args is the list of arguments passed
    return (int): value to return to the OS as program exit code
    """
    # arguments handling
    parser = argparse.ArgumentParser(
        description="File format conversion utility")

    parser.add_argument('--version',
                        dest="version",
                        action='store_true',
                        help="show program's version number and exit")
    parser.add_argument("--input",
                        "-i",
                        dest="input",
                        help="name of the input file")
    parser.add_argument(
        "--effcomp",
        dest="effcomp",
        help="name of a spectrum efficiency compensation table (in CSV format)"
    )
    fmts = dataio.get_available_formats(os.O_WRONLY)
    parser.add_argument(
        "--output",
        "-o",
        dest="output",
        help="name of the output file. "
        "The file format is derived from the extension (%s are supported)." %
        (" and ".join(fmts)))

    parser.add_argument(
        "--minus",
        "-m",
        dest="minus",
        action='append',
        help=
        "name of an acquisition file whose data is subtracted from the input file."
    )

    # TODO: --range parameter to select which image to select from the input
    #      (like: 1-4,5,6-10,12)

    options = parser.parse_args(args[1:])

    # Cannot use the internal feature, because it doesn't support multi-line
    if options.version:
        print(odemis.__fullname__ + " " + odemis.__version__ + "\n" +
              odemis.__copyright__ + "\n" + "Licensed under the " +
              odemis.__license__)
        return 0

    infn = options.input
    ecfn = options.effcomp
    outfn = options.output

    if not (infn or ecfn) or not outfn:
        raise ValueError(
            "--input/--effcomp and --output arguments must be provided.")

    if infn and ecfn:
        raise ValueError(
            "--input and --effcomp should not be provided simultaneously.")

    if infn:
        data, thumbs = open_acq(infn)
        logging.info("File contains %d %s (and %d %s)", len(data),
                     ngettext("image", "images", len(data)), len(thumbs),
                     ngettext("thumbnail", "thumbnails", len(thumbs)))
    elif ecfn:
        data = open_ec(ecfn)
        thumbs = []
        logging.info("File contains %d coefficients", data[0].shape[0])

    if options.minus:
        if thumbs:
            logging.info("Dropping thumbnail due to subtraction")
            thumbs = []
        for fn in options.minus:
            sdata, sthumbs = open_acq(fn)
            data = minus(data, sdata)

    save_acq(outfn, data, thumbs)

    logging.info("Successfully generated file %s", outfn)
Exemple #18
0
class ZStackPlugin(Plugin):
    name = "Z Stack"
    __version__ = "1.3"
    __author__ = u"Anders Muskens"
    __license__ = "GPLv2"

    # Describe how the values should be displayed
    # See odemis.gui.conf.data for all the possibilities
    vaconf = OrderedDict((
        (
            "numberOfAcquisitions",
            {
                "control_type": odemis.gui.CONTROL_INT,  # no slider
            }),
        ("filename", {
            "control_type":
            odemis.gui.CONTROL_SAVE_FILE,
            "wildcard":
            formats_to_wildcards(get_available_formats(os.O_WRONLY))[0],
        }),
        ("zstep", {
            "control_type": odemis.gui.CONTROL_FLT,
        }),
        ("zstart", {
            "control_type": odemis.gui.CONTROL_FLT,
        }),
    ))

    def __init__(self, microscope, main_app):
        super(ZStackPlugin, self).__init__(microscope, main_app)
        # Can only be used with a microscope
        main_data = self.main_app.main_data

        if not microscope or main_data.focus is None:
            return

        self.focus = main_data.focus
        self._zrange = self.focus.axes['z'].range
        zunit = self.focus.axes['z'].unit
        self._old_pos = self.focus.position.value
        z = max(self._zrange[0], min(self._old_pos['z'], self._zrange[1]))
        self.zstart = model.FloatContinuous(z, range=self._zrange, unit=zunit)
        self.zstep = model.FloatContinuous(1e-6,
                                           range=(-1e-5, 1e-5),
                                           unit=zunit,
                                           setter=self._setZStep)
        self.numberofAcquisitions = model.IntContinuous(
            3, (2, 999), setter=self._setNumberOfAcquisitions)

        self.filename = model.StringVA("a.h5")
        self.expectedDuration = model.VigilantAttribute(1,
                                                        unit="s",
                                                        readonly=True)

        self.zstep.subscribe(self._update_exp_dur)
        self.numberofAcquisitions.subscribe(self._update_exp_dur)

        self._acq_streams = None  # previously folded streams, for optimisation
        self._dlg = None
        self.addMenu("Acquisition/ZStack...\tCtrl+B", self.start)

    def _acqRangeIsValid(self, acq_range):
        return self._zrange[0] <= acq_range <= self._zrange[1]

    def _setZStep(self, zstep):
        # Check if the acquisition will be within the range of the actuator
        acq_range = self.zstart.value + zstep * self.numberofAcquisitions.value
        if self._acqRangeIsValid(acq_range):
            return zstep
        else:
            return self.zstep.value  # Old value

    def _setNumberOfAcquisitions(self, n_acq):
        # Check if the acquisition will be within the range of the actuator
        acq_range = self.zstart.value + self.zstep.value * n_acq
        if self._acqRangeIsValid(acq_range):
            return n_acq
        else:
            return self.numberofAcquisitions.value  # Old value

    def _get_new_filename(self):
        conf = get_acqui_conf()
        return os.path.join(
            conf.last_path,
            u"%s%s" % (time.strftime("%Y%m%d-%H%M%S"), conf.last_extension))

    def _estimate_step_duration(self):
        """
        return (float > 0): estimated time (in s) that it takes to move the focus
          by one step.
        """
        speed = None
        if model.hasVA(self.focus, "speed"):
            speed = self.focus.speed.value.get('z', None)
        if speed is None:
            speed = 10e-6  # m/s, pessimistic

        return driver.estimateMoveDuration(abs(self.zstep.value), speed, 0.01)

    def _update_exp_dur(self, _=None):
        """
        Called when VA that affects the expected duration is changed
        """
        nsteps = self.numberofAcquisitions.value
        step_time = self._estimate_step_duration()
        ss = self._get_acq_streams()

        sacqt = acqmng.estimateTime(ss)
        logging.debug("Estimating %g s acquisition for %d streams", sacqt,
                      len(ss))

        dur = sacqt * nsteps + step_time * (nsteps - 1)
        # Use _set_value as it's read only
        self.expectedDuration._set_value(math.ceil(dur), force_write=True)

    def _get_live_streams(self, tab_data):
        """
        Return all the live streams present in the given tab
        """
        ss = list(tab_data.streams.value)

        # On the SPARC, there is a Spot stream, which we don't need for live
        if hasattr(tab_data, "spotStream"):
            try:
                ss.remove(tab_data.spotStream)
            except ValueError:
                pass  # spotStream was not there anyway

        for s in ss:
            if isinstance(s, StaticStream):
                ss.remove(s)
        return ss

    def _get_acq_streams(self):
        """
        Return the streams that should be used for acquisition
        return:
           acq_st (list of streams): the streams to be acquired at every repetition
        """
        if not self._dlg:
            return []

        live_st = (self._dlg.view.getStreams() +
                   self._dlg.hidden_view.getStreams())
        logging.debug("View has %d streams", len(live_st))

        # On the SPARC, the acquisition streams are not the same as the live
        # streams. On the SECOM/DELPHI, they are the same (for now)
        tab_data = self.main_app.main_data.tab.value.tab_data_model
        if hasattr(tab_data, "acquisitionStreams"):
            acq_st = tab_data.acquisitionStreams
            # Discard the acquisition streams which are not visible
            ss = []
            for acs in acq_st:
                if isinstance(acs, stream.MultipleDetectorStream):
                    if any(subs in live_st for subs in acs.streams):
                        ss.append(acs)
                        break
                elif acs in live_st:
                    ss.append(acs)
        else:
            # No special acquisition streams
            ss = live_st

        self._acq_streams = acqmng.foldStreams(ss, self._acq_streams)
        return self._acq_streams

    def _on_focus_pos(self, pos):
        # Do not listen to zstart when we change it, to make sure there is no loop
        self.zstart.unsubscribe(self._on_zstart)
        self.zstart.value = pos["z"]
        self.zstart.subscribe(self._on_zstart)

    def _on_zstart(self, zpos):
        self.focus.moveAbs({"z": zpos})
        # Don't wait for it to finish moving, eventually it will update the
        # focus position... and will set the zstart value

    def start(self):
        # Fail if the live tab is not selected
        tab = self.main_app.main_data.tab.value
        if tab.name not in ("secom_live", "sparc_acqui"):
            box = wx.MessageDialog(
                self.main_app.main_frame,
                "ZStack acquisition must be done from the acquisition stream.",
                "ZStack acquisition not possible", wx.OK | wx.ICON_STOP)
            box.ShowModal()
            box.Destroy()
            return

        # On SPARC, fail if no ROI selected
        if hasattr(tab.tab_data_model,
                   "roa") and tab.tab_data_model.roa.value == UNDEFINED_ROI:
            box = wx.MessageDialog(
                self.main_app.main_frame,
                "You need to select a region of acquisition.",
                "Z stack acquisition not possible", wx.OK | wx.ICON_STOP)
            box.ShowModal()
            box.Destroy()
            return

        # Stop the stream(s) playing to not interfere with the acquisition
        tab.streambar_controller.pauseStreams()

        self.filename.value = self._get_new_filename()
        dlg = AcquisitionDialog(
            self, "Z Stack acquisition",
            "The same streams will be acquired multiple times at different Z positions, defined starting from Z start, with a step size.\n"
        )
        self._dlg = dlg
        dlg.addSettings(self, self.vaconf)
        ss = self._get_live_streams(tab.tab_data_model)
        for s in ss:
            if isinstance(
                    s,
                (ARStream, SpectrumStream, MonochromatorSettingsStream)):
                # TODO: instead of hard-coding the list, a way to detect the type
                # of live image?
                logging.info(
                    "Not showing stream %s, for which the live image is not spatial",
                    s)
                dlg.addStream(s, index=None)
            else:
                dlg.addStream(s)
        dlg.addButton("Cancel")
        dlg.addButton("Acquire", self.acquire, face_colour='blue')

        # Connect zstart with the actual focus position
        self.zstart.subscribe(self._on_zstart)
        self.focus.position.subscribe(self._on_focus_pos, init=True)

        # Update acq time when streams are added/removed
        dlg.view.stream_tree.flat.subscribe(self._update_exp_dur, init=True)
        dlg.hidden_view.stream_tree.flat.subscribe(self._update_exp_dur,
                                                   init=True)
        # TODO: update the acquisition time whenever a setting changes

        # TODO: disable "acquire" button if no stream selected

        # TODO: also display the repetition and axis settings for the SPARC streams.

        ans = dlg.ShowModal()

        if ans == 0:
            logging.info("Acquisition cancelled")
        elif ans == 1:
            logging.info("Acquisition completed")
        else:
            logging.warning("Got unknown return code %s", ans)

        self.focus.position.unsubscribe(self._on_focus_pos)
        self.zstart.unsubscribe(self._on_zstart)

        # Don't hold references
        self._acq_streams = None
        if dlg:  # If dlg hasn't been destroyed yet
            dlg.Destroy()

    def constructCube(self, images):
        # images is a list of 3 dim data arrays.
        ret = []
        for image in images:
            stack = numpy.dstack(image)
            stack = numpy.swapaxes(stack, 1, 2)
            ret.append(stack[0])

        # Add back metadata
        metadata3d = copy.copy(images[0].metadata)
        # Extend pixel size to 3D
        ps_x, ps_y = metadata3d[model.MD_PIXEL_SIZE]
        ps_z = self.zstep.value

        # Computer cube centre
        c_x, c_y = metadata3d[model.MD_POS]
        c_z = self.zstart.value + (self.zstep.value *
                                   self.numberofAcquisitions.value) / 2
        metadata3d[model.MD_POS] = (c_x, c_y, c_z)

        # For a negative pixel size, convert to a positive and flip the z axis
        if ps_z < 0:
            ret = numpy.flipud(ret)
            ps_z = -ps_z

        metadata3d[model.MD_PIXEL_SIZE] = (ps_x, ps_y, abs(ps_z))
        metadata3d[model.MD_DIMS] = "ZYX"

        ret = DataArray(ret, metadata3d)

        return ret

    """
    The acquire function API is generic.
    Special functionality is added in the functions
    """

    def initAcquisition(self):
        """
        Called before acquisition begins.
        Returns: (float) estimate of time per step
        """
        logging.info("Z stack acquisition started with %d levels",
                     self.numberofAcquisitions.value)

        # Move the focus to the start z position
        logging.debug(
            "Preparing Z Stack acquisition. Moving focus to start position")
        self._old_pos = self.focus.position.value
        self.focus.moveAbs({'z': self.zstart.value}).result()
        self.focus.position.unsubscribe(
            self._on_focus_pos
        )  # to not update zstart when going through the steps
        self.zstart.unsubscribe(self._on_zstart)
        return self._estimate_step_duration()

    def stepAcquisition(self, i, images):
        """
        An action that executes for the ith step of the acquisition
        i (int): the step number
        images []: A list of images as DataArrays
        """
        self.focus.moveRel({'z': self.zstep.value}).result()

    def completeAcquisition(self, completed):
        """
        Run actions that clean up after the acquisition occurs.
        completed (bool): True if completed without trouble
        """
        # Mvoe back to start
        if completed:
            logging.info("Z Stack acquisition complete.")
        logging.debug("Returning focus to start position %s", self._old_pos)
        self.focus.moveAbs(self._old_pos).result()
        self.focus.position.subscribe(self._on_focus_pos)
        self.zstart.subscribe(self._on_zstart)

    def postProcessing(self, images):
        """
        Post-process the images after the acquisition is done.
        images []: list of list of DataArrays (2D): first dim is the different streams,
        the second dimension is the different acquisition number.
        Returns: [list] list of a list of images that have been processed
        """
        cubes = [self.constructCube(ims) for ims in images]
        return cubes

    def acquire(self, dlg):
        """
        Acquisition operation.
        """
        main_data = self.main_app.main_data
        str_ctrl = main_data.tab.value.streambar_controller
        stream_paused = str_ctrl.pauseStreams()
        dlg.pauseSettings()

        nb = self.numberofAcquisitions.value
        ss = self._get_acq_streams()

        sacqt = acqmng.estimateTime(ss)

        completed = False

        try:
            step_time = self.initAcquisition()
            logging.debug("Acquisition streams: %s", ss)

            # TODO: if drift correction, use it over all the time
            f = model.ProgressiveFuture()
            f.task_canceller = lambda l: True  # To allow cancelling while it's running
            f.set_running_or_notify_cancel(
            )  # Indicate the work is starting now
            dlg.showProgress(f)

            # list of list of DataArray: for each stream, for each acquisition, the data acquired
            images = None

            for i in range(nb):
                left = nb - i
                dur = sacqt * left + step_time * (left - 1)

                logging.debug("Acquisition %d of %d", i, nb)

                startt = time.time()
                f.set_progress(end=startt + dur)
                das, e = acqmng.acquire(
                    ss, self.main_app.main_data.settings_obs).result()
                if images is None:
                    # Copy metadata from the first acquisition
                    images = [[] for i in range(len(das))]

                for im, da in zip(images, das):
                    im.append(da)

                if f.cancelled():
                    raise CancelledError()

                # Execute an action to prepare the next acquisition for the ith acquisition
                self.stepAcquisition(i, images)

            f.set_result(None)  # Indicate it's over

            # Construct a cube from each stream's image.
            images = self.postProcessing(images)

            # Export image
            exporter = dataio.find_fittest_converter(self.filename.value)
            exporter.export(self.filename.value, images)
            completed = True
            dlg.Close()

        except CancelledError:
            logging.debug("Acquisition cancelled.")
            dlg.resumeSettings()

        except e:
            logging.exception(e)

        finally:
            # Do completion actions
            self.completeAcquisition(completed)
Exemple #19
0
def main(args):
    """
    Handles the command line arguments
    args is the list of arguments passed
    return (int): value to return to the OS as program exit code
    """
    # arguments handling
    parser = argparse.ArgumentParser(
        description="File format conversion utility")

    parser.add_argument('--version',
                        dest="version",
                        action='store_true',
                        help="show program's version number and exit")
    parser.add_argument("--input",
                        "-i",
                        dest="input",
                        help="name of the input file")
    parser.add_argument("--tiles",
                        "-t",
                        dest="tiles",
                        nargs="+",
                        help="list of files acquired in tiles to re-assemble")
    parser.add_argument(
        "--effcomp",
        dest="effcomp",
        help="name of a spectrum efficiency compensation table (in CSV format)"
    )
    fmts = dataio.get_available_formats(os.O_WRONLY)
    parser.add_argument(
        "--output",
        "-o",
        dest="output",
        help="name of the output file. "
        "The file format is derived from the extension (%s are supported)." %
        (" and ".join(fmts)))
    # TODO: automatically select pyramidal format if image > 4096px?
    parser.add_argument(
        "--pyramid",
        "-p",
        dest="pyramid",
        action='store_true',
        help="Export the data in pyramidal format. "
        "It takes about 2x more space, but allows to visualise large images. "
        "Currently, only the TIFF format supports this option.")
    parser.add_argument(
        "--minus",
        "-m",
        dest="minus",
        action='append',
        help=
        "name of an acquisition file whose data is subtracted from the input file."
    )
    parser.add_argument(
        "--weaver",
        "-w",
        dest="weaver",
        help=
        "name of weaver to be used during stitching. Options: 'mean': MeanWeaver "
        "(blend overlapping regions of adjacent tiles), 'collage': CollageWeaver "
        "(paste tiles as-is at calculated position)",
        choices=("mean", "collage", "collage_reverse"),
        default='mean')
    parser.add_argument(
        "--registrar",
        "-r",
        dest="registrar",
        help=
        "name of registrar to be used during stitching. Options: 'identity': IdentityRegistrar "
        "(place tiles at original position), 'shift': ShiftRegistrar (use cross-correlation "
        "algorithm to correct for suboptimal stage movement), 'global_shift': GlobalShiftRegistrar "
        "(uses cross-correlation algorithm with global optimization)",
        choices=("identity", "shift", "global_shift"),
        default="global_shift")

    # TODO: --export (spatial) image that defaults to a HFW corresponding to the
    # smallest image, and can be overridden by --hfw xxx (in µm).
    # TODO: --range parameter to select which image to select from the input
    #      (like: 1-4,5,6-10,12)

    options = parser.parse_args(args[1:])

    # Cannot use the internal feature, because it doesn't support multi-line
    if options.version:
        print(odemis.__fullname__ + " " + odemis.__version__ + "\n" +
              odemis.__copyright__ + "\n" + "Licensed under the " +
              odemis.__license__)
        return 0

    infn = options.input
    tifns = options.tiles
    ecfn = options.effcomp
    outfn = options.output

    if not (infn or tifns or ecfn) or not outfn:
        raise ValueError(
            "--input/--tiles/--effcomp and --output arguments must be provided."
        )

    if sum(not not o for o in (infn, tifns, ecfn)) != 1:
        raise ValueError(
            "--input, --tiles, --effcomp cannot be provided simultaneously.")

    if infn:
        data, thumbs = open_acq(infn)
        logging.info("File contains %d %s (and %d %s)", len(data),
                     ngettext("image", "images", len(data)), len(thumbs),
                     ngettext("thumbnail", "thumbnails", len(thumbs)))
    elif tifns:
        registration_method = {
            "identity": REGISTER_IDENTITY,
            "shift": REGISTER_SHIFT,
            "global_shift": REGISTER_GLOBAL_SHIFT
        }[options.registrar]
        weaving_method = {
            "collage": WEAVER_COLLAGE,
            "mean": WEAVER_MEAN,
            "collage_reverse": WEAVER_COLLAGE_REVERSE
        }[options.weaver]
        data = stitch(tifns, registration_method, weaving_method)
        thumbs = []
        logging.info("File contains %d %s", len(data),
                     ngettext("stream", "streams", len(data)))
    elif ecfn:
        data = open_ec(ecfn)
        thumbs = []
        logging.info("File contains %d coefficients", data[0].shape[0])

    if options.minus:
        if thumbs:
            logging.info("Dropping thumbnail due to subtraction")
            thumbs = []
        for fn in options.minus:
            sdata, _ = open_acq(fn)
            data = minus(data, sdata)

    save_acq(outfn, data, thumbs, options.pyramid)

    logging.info("Successfully generated file %s", outfn)
Exemple #20
0
 def test_get_exporter(self):
     fmts = get_available_formats()
     for fmt in fmts:
         fmt_mng = get_exporter(fmt)
         self.assertGreaterEqual(fmt_mng.EXTENSIONS, 1)
Exemple #21
0
class MergeChannelsPlugin(Plugin):
    name = "Add RGB channels"
    __version__ = "1.0"
    __author__ = u"Victoria Mavrikopoulou"
    __license__ = "GPLv2"

    # The values are displayed with the following order
    vaconf = OrderedDict((
        ("filenameR", {
            "label":
            "Red channel",
            "control_type":
            odemis.gui.CONTROL_OPEN_FILE,
            "wildcard":
            formats_to_wildcards(get_available_formats(os.O_RDONLY),
                                 include_all=True)[0]
        }),
        ("redShiftX", {
            "label": "   Red shift X"
        }),
        ("redShiftY", {
            "label": "   Red shift Y"
        }),
        ("filenameG", {
            "label":
            "Green channel",
            "control_type":
            odemis.gui.CONTROL_OPEN_FILE,
            "wildcard":
            formats_to_wildcards(get_available_formats(os.O_RDONLY),
                                 include_all=True)[0]
        }),
        ("greenShiftX", {
            "label": "   Green shift X"
        }),
        ("greenShiftY", {
            "label": "   Green shift Y"
        }),
        ("filenameB", {
            "label":
            "Blue channel",
            "control_type":
            odemis.gui.CONTROL_OPEN_FILE,
            "wildcard":
            formats_to_wildcards(get_available_formats(os.O_RDONLY),
                                 include_all=True)[0]
        }),
        ("blueShiftX", {
            "label": "   Blue shift X"
        }),
        ("blueShiftY", {
            "label": "   Blue shift Y"
        }),
        ("cropBottom", {
            "label": "Crop bottom"
        }),
    ))

    def __init__(self, microscope, main_app):
        super(MergeChannelsPlugin, self).__init__(microscope, main_app)

        self.filenameR = model.StringVA(" ")
        self.filenameG = model.StringVA(" ")
        self.filenameB = model.StringVA(" ")
        self.redShiftX = model.FloatContinuous(0, range=(-500, 500), unit="px")
        self.redShiftY = model.FloatContinuous(0, range=(-500, 500), unit="px")
        self.greenShiftX = model.FloatContinuous(0,
                                                 range=(-500, 500),
                                                 unit="px")
        self.greenShiftY = model.FloatContinuous(0,
                                                 range=(-500, 500),
                                                 unit="px")
        self.blueShiftX = model.FloatContinuous(0,
                                                range=(-500, 500),
                                                unit="px")
        self.blueShiftY = model.FloatContinuous(0,
                                                range=(-500, 500),
                                                unit="px")
        self.cropBottom = model.IntContinuous(0, range=(0, 200), unit="px")

        analysis_tab = self.main_app.main_data.getTabByName('analysis')
        analysis_tab.stream_bar_controller.add_action("Add RGB channels...",
                                                      self.start)

        self.filenameR.subscribe(self._filenameR)
        self.filenameG.subscribe(self._filenameG)
        self.filenameB.subscribe(self._filenameB)
        self.cropBottom.subscribe(self._cropBottom)

        self._subscribers = []
        self._dlg = None
        self._stream_red = None
        self._stream_green = None
        self._stream_blue = None
        self._raw_orig = {
        }  # dictionary (Stream -> DataArray) to handle the (un)cropping

    def start(self):
        dlg = AcquisitionDialog(
            self,
            "Merging channels to RGB image",
            text="Insert 3 R, G, B files so that they are assigned the tints \n"
            "and are merged to an RGB image.")
        # remove the play overlay from the viewport
        dlg.viewport_l.canvas.remove_view_overlay(
            dlg.viewport_l.canvas.play_overlay)

        self._dlg = dlg
        dlg.addStream(None)
        dlg.Size = (1000, 600)

        dlg.addSettings(self, self.vaconf)
        dlg.addButton("Cancel", None)
        dlg.addButton("Add", self._updateViewer, face_colour='blue')

        dlg.pnl_gauge.Hide()
        dlg.ShowModal()  # Blocks until the window is closed

        # Destroy the dialog and reset the VAs and subscribers
        dlg.Destroy()
        self.filenameR.value = " "
        self.filenameG.value = " "
        self.filenameB.value = " "
        self.redShiftX.value = 0
        self.redShiftY.value = 0
        self.greenShiftX.value = 0
        self.greenShiftY.value = 0
        self.blueShiftX.value = 0
        self.blueShiftY.value = 0
        self.cropBottom.value = 0
        self._subscribers = []
        self._dlg = None
        self._raw_orig = {}

    def _filenameR(self, filenameR):
        """Open the filename that corresponds to RED channel. If an image is already inserted, remove the old stream
        and add the new stream in the Acquisition Dialog."""
        if self._stream_red is not None:
            self._removeStream(self._stream_red)
        self._stream_red = self._openImage(filenameR, TINT_RED, self.redShiftX,
                                           self.redShiftY)
        self._storeDir(filenameR)

    def _filenameG(self, filenameG):
        """Open the filename that corresponds to GREEN channel. If an image is already inserted, remove the old stream
        and add the new stream in the Acquisition Dialog."""
        if self._stream_green is not None:
            self._removeStream(self._stream_green)
        self._stream_green = self._openImage(filenameG, TINT_GREEN,
                                             self.greenShiftX,
                                             self.greenShiftY)
        self._storeDir(filenameG)

    def _filenameB(self, filenameB):
        """Open the filename that corresponds to BLUE channel. If an image is already inserted, remove the old stream
        and add the new stream in the Acquisition Dialog."""
        if self._stream_blue is not None:
            self._removeStream(self._stream_blue)
        self._stream_blue = self._openImage(filenameB, TINT_BLUE,
                                            self.blueShiftX, self.blueShiftY)
        self._storeDir(filenameB)

    def _storeDir(self, fn):
        """Store the directory of the given filename so as the next filename is in the same place"""
        path, bn = os.path.split(fn)
        files = [self.filenameR, self.filenameG, self.filenameB]
        for se in self._dlg.setting_controller.entries:
            if se.vigilattr in files:
                se.value_ctrl.default_dir = path

    def _openImage(self, filename, tint, shiftX, shiftY):
        """ Open the given filename and assign the tint of the corresponding channel. Add the stream to the dialog and
        apply the crop functionality. Two sliders are displayed for every image to provide the option of shifting the
        streams in x and y dimension. If there is no filename given return None.
        Args:
            filename(str) : the given filename with the R, G or B stream
            tint(tuple): the color tint to be assigned
            shiftX(ContinuousVA): shift x value in meters
            shiftY(ContinuousVA): shift y value in meters
        Returns (Stream or None): the displayed stream
        """
        if filename == " ":
            return None

        try:
            data = udataio.open_acquisition(filename)[0]
            pxs = data.metadata.get(model.MD_PIXEL_SIZE, (1e-06, 1e-06))
            if pxs[0] > 1e-04 or pxs[1] > 1e-04:
                data.metadata[model.MD_PIXEL_SIZE] = (1e-06, 1e-06)
                logging.warning(
                    "The given pixel size %s is too big, it got replaced to the default value %s",
                    pxs, (1e-06, 1e-06))
            data = self._ensureRGB(data, tint)
        except Exception as ex:
            logging.exception("Failed to open %s", filename)
            self._showErrorMessage("Failed to open image",
                                   "Failed to open image:\n%s" % (ex, ))
            return None

        basename, ext = os.path.splitext(os.path.split(filename)[1])
        stream_ch = stream.StaticFluoStream(basename, data)
        self._raw_orig[stream_ch] = data
        self._dlg.addStream(stream_ch)
        self._setupStreambar()

        self._cropBottom()
        self._connectShift(stream_ch, 0, shiftX)
        self._connectShift(stream_ch, 1, shiftY)

        return stream_ch

    @call_in_wx_main
    def _showErrorMessage(self, title, msg):
        """
        Shows an error message in a message box
        title (str)
        msg (str)
        """
        box = wx.MessageDialog(self._dlg, msg, title, wx.OK | wx.ICON_STOP)
        box.ShowModal()
        box.Destroy()

    def _ensureRGB(self, data, tint):
        """
        Ensures that the image is grayscale. If the image is a grayscale RGB, convert it
        to an 8bit grayscale image of 2 dimensions and assign the corresponding tint to it.
        Update the metadata of the image.
        data (DataArray or DataArrayShadow): The input image
        return (DataArray): The result image which the assigned tint
        raises: ValueError if the image is RGB with different color channels
        """
        if len(data.shape) > 3:
            raise ValueError("Image format not supported")
        if isinstance(data, model.DataArrayShadow):
            data = data.getData()
        if len(data.shape) == 3:
            data = img.ensureYXC(data)
            if (numpy.all(data[:, :, 0] == data[:, :, 1])
                    and numpy.all(data[:, :, 0] == data[:, :, 2])):
                data = data[:, :, 0]
                data.metadata[model.MD_DIMS] = "YX"
            else:
                raise ValueError("Coloured RGB image not supported")

        if model.MD_POS not in data.metadata:
            data.metadata[model.MD_POS] = (0, 0)
        if model.MD_PIXEL_SIZE not in data.metadata:
            data.metadata[model.MD_PIXEL_SIZE] = (1e-9, 1e-9)
        data.metadata[model.MD_USER_TINT] = tint

        return data

    def _connectShift(self, stream, index, vashift):
        """Create listeners with information of the stream and the dimension.
        Hold a reference to the listeners to prevent automatic subscription"""
        va_on_shift = functools.partial(self._onShift, stream, index)
        self._subscribers.append(va_on_shift)
        vashift.subscribe(va_on_shift)

    def _removeStream(self, st):
        """Remove the given stream since another one is loaded from the user for display"""
        sconts = self._dlg.streambar_controller.stream_controllers
        for sc in sconts:
            if sc.stream is st:
                sc.stream_panel.on_remove_btn(st)
                del self._raw_orig[st]

    @call_in_wx_main
    def _setupStreambar(self):
        """Force stream panel to static mode. Needed for preventing user to play or
        remove streams from the stream panel"""
        sconts = self._dlg.streambar_controller.stream_controllers
        for sctrl in sconts:
            sctrl.stream_panel.to_static_mode()

    def _onShift(self, stream, i, value):
        """
        Update the stream after shifting it by the given value.
        Args:
            stream(StaticFluoStream): stream to be shifted
            i(int): index to show at which dimension the stream is to be shifted
            value(ContinuousVA): shift values in meters
        """
        logging.debug("New shift = %f on stream %s", value, stream.name.value)
        poscor = stream.raw[0].metadata.get(model.MD_POS_COR, (0, 0))
        px_size = stream.raw[0].metadata[model.MD_PIXEL_SIZE]
        if i == 0:
            poscor = (-value * px_size[0], poscor[1])
        else:
            poscor = (poscor[0], -value * px_size[1])
        stream.raw[0].metadata[model.MD_POS_COR] = poscor
        self._forceUpdate(stream)

    def _cropBottom(self, _=None):
        """Crop the data bar at the bottom of the image"""
        for st, r in self._raw_orig.items():
            prev_md = st.raw[0].metadata
            st.raw[0] = r[:max(1, r.shape[0] - self.cropBottom.value), :]
            st.raw[0].metadata = prev_md
            self._forceUpdate(st)

    def _forceUpdate(self, st):
        """Force updating the projection of the given stream"""
        views = [self._dlg.view]
        for v in views:
            for sp in v.stream_tree.getProjections():  # stream or projection
                if isinstance(sp, DataProjection):
                    s = sp.stream
                else:
                    s = sp
                if s is st:
                    sp._shouldUpdateImage()

    def _updateViewer(self, dlg):
        """Update the view in the Analysis Tab with the merged image.
        Called when the user clicks on Done to close the dialog"""
        views = [self._dlg.view]
        das = []
        for v in views:
            for st in v.stream_tree.getProjections():  # stream or projection
                if isinstance(st, DataProjection):
                    s = st.stream
                else:
                    s = st
                das.append(s.raw[0])

        analysis_tab = self.main_app.main_data.tab.value
        analysis_tab.display_new_data(self.filenameR.value, das, extend=True)

        dlg.Close()