Example #1
0
    def save(self, dlg):
        """
        Stores the current CL data into a TIFF/HDF5 file
        """
        f = model.ProgressiveFuture()

        try:
            das = self._acquire(dlg, f)
        except CancelledError:
            logging.debug("Stopping acquisition + export, as it was cancelled")
            return
        except Exception as e:
            logging.exception("Failed to acquire CL data: %s", e)
            return

        fn = self.filename.value
        bn, ext = splitext(fn)
        if ext == ".png":
            logging.debug("Using HDF5 instead of PNG")
            fn = bn + ".h5"
        exporter = dataio.find_fittest_converter(fn)

        try:
            exporter.export(fn, das)
        except Exception:
            logging.exception("Failed to store data in %s", fn)

        f.set_result(None)  # Indicate it's over
        self._update_filename()
Example #2
0
    def save(self, dlg):
        """
        Stores the current CL data into a TIFF/HDF5 file
        """
        f = model.ProgressiveFuture()

        try:
            das = self._acquire(dlg, f)
        except CancelledError:
            logging.debug("Stopping acquisition + export, as it was cancelled")
            return
        except Exception as e:
            logging.exception("Failed to acquire CL data: %s", e)
            return

        fn = self.filename.value
        bn, ext = splitext(fn)
        if ext == ".png":
            logging.debug("Using HDF5 instead of PNG")
            fn = bn + ".h5"
        exporter = dataio.find_fittest_converter(fn)

        try:
            exporter.export(fn, das)
        except Exception:
            logging.exception("Failed to store data in %s", fn)

        f.set_result(None)  # Indicate it's over
        self._update_filename()
Example #3
0
    def _on_browse(self, evt):
        current = self.GetValue() or ""
        if current and os.path.isdir(current):
            path = current
            bn = ""
        else:
            path, bn = os.path.split(current)
            if not (path and os.path.isdir(path)):
                path = self.default_dir
                bn = ""

        dlg = wx.FileDialog(self,
                            self.dialog_title,
                            path,
                            bn,
                            wildcard=self.wildcard,
                            style=self.dialog_style)

        if self.dialog_style & wx.FD_SAVE and bn != "":
            # Select the format corresponding to the current file extension
            all_exts = self._get_all_exts_from_wildcards(self.wildcard)
            for i, exts in enumerate(all_exts):
                if any(fnmatch.fnmatch(bn, ext) for ext in exts):
                    dlg.SetFilterIndex(i)
                    break
            else:
                logging.debug("File %s didn't match any extension", bn)

        if dlg.ShowModal() == wx.ID_OK:
            fullpath = dlg.GetPath()

            if self.dialog_style & wx.FD_SAVE:
                # Make sure the extension fits the select file format
                exts = self._get_exts_from_wildcards(self.wildcard,
                                                     dlg.GetFilterIndex())
                for ext in exts:
                    if fnmatch.fnmatch(fullpath, ext) or ext == "*.*":
                        logging.debug("File %s matches extension %s", fullpath,
                                      ext)
                        break  # everything is fine
                else:
                    # Remove the current extension, and use a fitting one
                    bn, oldext = splitext(fullpath)
                    try:
                        newext = "." + exts[0].split(".", 1)[1]
                        fullpath = bn + newext
                        logging.debug("Adjusted file extension to %s", newext)
                    except Exception:
                        logging.exception(
                            "Failed to adjust filename to extensions %s" %
                            (exts, ))

            self._SetValue(fullpath, raise_event=True)
            # Update default_dir so that if the value is cleared, we start from
            # this directory
            self.default_dir = os.path.dirname(fullpath)

        dlg.Destroy()
Example #4
0
    def acquire(self, dlg):
        main_data = self.main_app.main_data
        str_ctrl = main_data.tab.value.streambar_controller
        stream_paused = str_ctrl.pauseStreams()
        dlg.pauseSettings()

        nb = self.numberOfAcquisitions.value
        p = self.period.value
        ss, last_ss = self._get_acq_streams()

        fn = self.filename.value
        exporter = dataio.find_fittest_converter(fn)
        bs, ext = splitext(fn)
        fn_pat = bs + "-%.5d" + ext

        sacqt = acq.estimateTime(ss)
        intp = max(0, p - sacqt)
        if p < sacqt:
            logging.warning(
                "Acquisition will take %g s, but period between acquisition must be only %g s",
                sacqt, p
            )

        # TODO: if drift correction, use it over all the time

        f = model.ProgressiveFuture()
        f.task_canceller = lambda l: True  # To allow cancelling while it's running
        f.set_running_or_notify_cancel()  # Indicate the work is starting now
        dlg.showProgress(f)

        for i in range(nb):
            left = nb - i
            dur = sacqt * left + intp * (left - 1)
            if left == 1 and last_ss:
                ss += last_ss
                dur += acq.estimateTime(ss) - sacqt

            startt = time.time()
            f.set_progress(end=startt + dur)
            das, e = acq.acquire(ss).result()
            if f.cancelled():
                dlg.resumeSettings()
                return

            exporter.export(fn_pat % (i,), das)

            # Wait the period requested, excepted the last time
            if left > 1:
                sleept = (startt + p) - time.time()
                if sleept > 0:
                    time.sleep(sleept)
                else:
                    logging.info("Immediately starting next acquisition, %g s late", -sleept)

        f.set_result(None)  # Indicate it's over

        # self.showAcquisition(self.filename.value)
        dlg.Close()
Example #5
0
    def _fast_acquire_one(self, dlg, st, last_ss):
        """
        Acquires one stream, *as fast as possible* (ie, the period is not used).
        Only works with LiveStreams (and not with MDStreams)
        st (LiveStream)
        last_ss (list of Streams): all the streams to be acquire on the last time
        """
        # Essentially, we trick a little bit the stream, by convincing it that
        # we want a live view, but instead of display the data, we store them.
        # It's much faster because we don't have to stop/start the detector between
        # each acquisition.
        nb = self.numberOfAcquisitions.value

        fn = self.filename.value
        self._exporter = dataio.find_fittest_converter(fn)
        bs, ext = splitext(fn)
        fn_pat = bs + "-%.5d" + ext

        self._acq_completed = threading.Event()

        f = model.ProgressiveFuture()
        f.task_canceller = self._cancel_fast_acquire
        f._stream = st
        if last_ss:
            nb -= 1
            extra_dur = acq.estimateTime([st] + last_ss)
        else:
            extra_dur = 0
        self._hijack_live_stream(st, f, nb, fn_pat, extra_dur)

        try:
            # Start acquisition and wait until it's done
            f.set_running_or_notify_cancel(
            )  # Indicate the work is starting now
            dlg.showProgress(f)
            st.is_active.value = True
            self._acq_completed.wait()

            if f.cancelled():
                dlg.resumeSettings()
                return
        finally:
            st.is_active.value = False  # just to be extra sure it's stopped
            logging.debug("Restoring stream %s", st)
            self._restore_live_stream(st)

        # last "normal" acquisition, if needed
        if last_ss:
            logging.debug("Acquiring last acquisition, with all the streams")
            ss = [st] + last_ss
            f.set_progress(end=time.time() + acq.estimateTime(ss))
            das, e = acq.acquire(
                ss, self.main_app.main_data.settings_obs).result()
            self._save_data(fn_pat % (nb, ), das)

        self._stop_saving_threads()  # Wait for all the data to be stored
        f.set_result(None)  # Indicate it's over
Example #6
0
    def test_splitext(self):
        # input, output
        tio = (
            ("/home/test/booo.tiff.png", ("/home/test/booo.tiff", ".png")),
            (".test.doc", (".test", ".doc")),
            ("test.ome.tiff", ("test", ".ome.tiff")),
            ("ahhh....ome.tiff", ("ahhh...", ".ome.tiff")),
            (".bashrc", (".bashrc", "")),
        )

        for inp, eo in tio:
            ao = splitext(inp)
            self.assertEqual(ao, eo, "Unexpected output for '%s': %s" % (inp, ao))
Example #7
0
    def test_splitext(self):
        # input, output
        tio = (
            ("/home/test/booo.tiff.png", ("/home/test/booo.tiff", ".png")),
            (".test.doc", (".test", ".doc")),
            ("test.ome.tiff", ("test", ".ome.tiff")),
            ("ahhh....ome.tiff", ("ahhh...", ".ome.tiff")),
            (".bashrc", (".bashrc", "")),
        )

        for inp, eo in tio:
            ao = splitext(inp)
            self.assertEqual(ao, eo,
                             "Unexpected output for '%s': %s" % (inp, ao))
Example #8
0
    def _get_export_info(self):
        """
        Return str, str, str: full filename, exporter name, export type
          Full filename is None if cancelled by user
        """
        # Set default to the first of the list
        view = self._data_model.focussedView.value
        export_type = self.get_export_type(view)
        formats = EXPORTERS[export_type]
        if self._conf.export_raw:
            default_exporter = get_converter(formats[1][0])
        else:
            default_exporter = get_converter(formats[0][0])
        extension = default_exporter.EXTENSIONS[0]

        batch_export = False

        # Suggested name= current file name + stream/view name + extension of default format
        fi = self._data_model.acq_fileinfo.value
        if fi is not None and fi.file_name:
            basename = os.path.basename(fi.file_name)
            # Remove the extension
            basename, _ = udataio.splitext(basename)

            # Use stream name, if there is just one stream, otherwise use the view name
            streams = view.getStreams()
            if len(streams) == 1:
                basename += " " + streams[0].name.value
            else:
                # TODO: remove numbers from the view name?
                basename += " " + view.name.value

            # Special batch export for AR view of polarization or polarimetry,
            # as they contain typically 6 or 24 images.
            if (export_type == 'AR' and streams
                    and (hasattr(streams[0], "polarimetry") or
                         (hasattr(streams[0], "polarization")
                          and len(streams[0].polarization.choices) > 1))):
                batch_export = True

        else:
            basename = time.strftime("%Y%m%d-%H%M%S", time.localtime())

        filepath = os.path.join(self._conf.last_export_path,
                                basename + extension)
        # filepath will be None if cancelled by user

        return self.ShowExportFileDialog(filepath, default_exporter,
                                         batch_export)
Example #9
0
    def _on_filename(self, fn):
        """
        Warn if extension not .png, store path and pattern in conf file
        """
        bn, ext = splitext(fn)
        if not ext.endswith(".png") and not ALLOW_SAVE:
            logging.warning("Only PNG format is recommended to use")

        # Store the directory so that next filename is in the same place
        p, bn = os.path.split(fn)
        if p:
            self.conf.last_path = p

        # Save pattern
        self.conf.fn_ptn, self.conf.fn_count = guess_pattern(fn)
Example #10
0
    def _on_filename(self, fn):
        """
        Warn if extension not .png, store path and pattern in conf file
        """
        bn, ext = splitext(fn)
        if not ext.endswith(".png") and not ALLOW_SAVE:
            logging.warning("Only PNG format is recommended to use")

        # Store the directory so that next filename is in the same place
        p, bn = os.path.split(fn)
        if p:
            self.conf.last_path = p

        # Save pattern
        self.conf.fn_ptn, self.conf.fn_count = guess_pattern(fn)
Example #11
0
def export_ar_to_csv(fn, background=None):
    """
    fn (str): full path to the AR data file
    background (DataArray or None): background data to subtract
    """
    das = dataio.open_acquisition(fn)
    if not das:  # No such file or file doesn't contain data
        return

    streams = dataio.data_to_static_streams(das)

    # Remove the extension of the filename, to extend the name with .csv
    fn_base = dataio.splitext(fn)[0]
    ar_streams = [s for s in streams if isinstance(s, ARStream)]
    for s in ar_streams:
        try:
            s.background.value = background
        except Exception as ex:
            logging.error("Failed to use background data: %s", ex)

        ar_proj = stream.ARRawProjection(s)

        # Export every position separately
        for p in s.point.choices:
            if p == (None,
                     None):  # Special "non-selected point" => not interesting
                continue
            s.point.value = p

            # Project to "raw" = Theta vs phi array
            exdata = img.ar_to_export_data([ar_proj], raw=True)

            # Pick a good name
            fn_csv = fn_base
            if len(ar_streams) > 1:  # Add the name of the stream
                fn_csv += "-" + s.name.value

            if len(s.point.choices) > 2:
                # More than one point in the stream => add position (in µm)
                fn_csv += f"-{p[0] * 1e6}-{p[1] * 1e6}"

            fn_csv += ".csv"

            # Save into a CSV file
            logging.info("Exporting point %s to %s", p, fn_csv)
            csv.export(fn_csv, exdata)
Example #12
0
    def _get_export_info(self):
        """
        Return str, str, str: full filename, exporter name, export type
          Full filename is None if cancelled by user
        """
        # Set default to the first of the list
        view = self._data_model.focussedView.value
        export_type = self.get_export_type(view)
        formats = EXPORTERS[export_type]
        if self._conf.export_raw:
            default_exporter = get_converter(formats[1][0])
        else:
            default_exporter = get_converter(formats[0][0])
        extension = default_exporter.EXTENSIONS[0]

        # Suggested name= current file name + stream/view name + extension of default format
        fi = self._data_model.acq_fileinfo.value
        if fi is not None and fi.file_name:
            basename = os.path.basename(fi.file_name)
            # Remove the extension
            basename, _ = udataio.splitext(basename)

            # Use stream name, if there is just one stream, otherwise use the view name
            streams = view.getStreams()
            if len(streams) == 1:
                basename += " " + streams[0].name.value
            else:
                # TODO: remove numbers from the view name?
                basename += " " + view.name.value
        else:
            basename = time.strftime("%Y%m%d-%H%M%S", time.localtime())

        filepath = os.path.join(self._conf.last_export_path, basename + extension)

        # filepath will be None if cancelled by user
        return self.ShowExportFileDialog(filepath, default_exporter)
Example #13
0
    def _get_export_info(self):
        """
        Return str, str, str: full filename, exporter name, export type
          Full filename is None if cancelled by user
        """
        # Set default to the first of the list
        view = self._data_model.focussedView.value
        export_type = self.get_export_type(view)
        formats = EXPORTERS[export_type]
        if self._conf.export_raw:
            default_exporter = get_converter(formats[1][0])
        else:
            default_exporter = get_converter(formats[0][0])
        extension = default_exporter.EXTENSIONS[0]

        # Suggested name= current file name + stream/view name + extension of default format
        fi = self._data_model.acq_fileinfo.value
        if fi is not None and fi.file_name:
            basename = os.path.basename(fi.file_name)
            # Remove the extension
            basename, _ = udataio.splitext(basename)

            # Use stream name, if there is just one stream, otherwise use the view name
            streams = view.getStreams()
            if len(streams) == 1:
                basename += " " + streams[0].name.value
            else:
                # TODO: remove numbers from the view name?
                basename += " " + view.name.value
        else:
            basename = time.strftime("%Y%m%d-%H%M%S", time.localtime())

        filepath = os.path.join(self._conf.last_export_path, basename + extension)

        # filepath will be None if cancelled by user
        return self.ShowExportFileDialog(filepath, default_exporter)
Example #14
0
    def acquire(self, dlg):
        main_data = self.main_app.main_data
        str_ctrl = self._tab.streambar_controller
        str_ctrl.pauseStreams()
        dlg.pauseSettings()
        self._unsubscribe_vas()

        orig_pos = main_data.stage.position.value
        trep = (self.nx.value, self.ny.value)
        nb = trep[0] * trep[1]
        # It's not a big deal if it was a bad guess as we'll use the actual data
        # before the first move
        sfov = self._guess_smallest_fov()
        fn = self.filename.value
        exporter = dataio.find_fittest_converter(fn)
        fn_bs, fn_ext = udataio.splitext(fn)

        ss, stitch_ss = self._get_acq_streams()
        end = self.estimate_time() + time.time()

        ft = model.ProgressiveFuture(end=end)
        self.ft = ft  # allows future to be canceled in show_dlg after closing window
        ft.running_subf = model.InstantaneousFuture()
        ft._task_state = RUNNING
        ft._task_lock = threading.Lock()
        ft.task_canceller = self._cancel_acquisition  # To allow cancelling while it's running
        ft.set_running_or_notify_cancel()  # Indicate the work is starting now
        dlg.showProgress(ft)

        # For stitching only
        da_list = []  # for each position, a list of DataArrays
        i = 0
        prev_idx = [0, 0]
        try:
            for ix, iy in self._generate_scanning_indices(trep):
                logging.debug("Acquiring tile %dx%d", ix, iy)
                self._move_to_tile((ix, iy), orig_pos, sfov, prev_idx)
                prev_idx = ix, iy
                # Update the progress bar
                ft.set_progress(end=self.estimate_time(nb - i) + time.time())

                ft.running_subf = acq.acquire(ss)
                das, e = ft.running_subf.result()  # blocks until all the acquisitions are finished
                if e:
                    logging.warning("Acquisition for tile %dx%d partially failed: %s",
                                    ix, iy, e)

                if ft._task_state == CANCELLED:
                    raise CancelledError()

                # TODO: do in a separate thread
                fn_tile = "%s-%.5dx%.5d%s" % (fn_bs, ix, iy, fn_ext)
                logging.debug("Will save data of tile %dx%d to %s", ix, iy, fn_tile)
                exporter.export(fn_tile, das)

                if ft._task_state == CANCELLED:
                    raise CancelledError()

                if self.stitch.value:
                    # Sort tiles (largest sem on first position)
                    da_list.append(self.sort_das(das, stitch_ss))

                # Check the FoV is correct using the data, and if not update
                if i == 0:
                    sfov = self._check_fov(das, sfov)
                i += 1

            # Move stage to original position
            main_data.stage.moveAbs(orig_pos)

            # Stitch SEM and CL streams
            st_data = []
            if self.stitch.value and (not da_list or not da_list[0]):
                # if only AR or Spectrum are acquired
                logging.warning("No stream acquired that can be used for stitching.")
            elif self.stitch.value:
                logging.info("Acquisition completed, now stitching...")
                ft.set_progress(end=self.estimate_time(0) + time.time())

                logging.info("Computing big image out of %d images", len(da_list))
                das_registered = stitching.register(da_list)

                # Select weaving method
                # On a Sparc system the mean weaver gives the best result since it
                # smoothes the transitions between tiles. However, using this weaver on the
                # Secom/Delphi generates an image with dark stripes in the overlap regions which are
                # the result of carbon decomposition effects that typically occur in samples imaged
                # by these systems. To mediate this, we use the
                # collage_reverse weaver that only shows the overlap region of the tile that
                # was imaged first.
                if self.microscope.role in ("secom", "delphi"):
                    weaving_method = WEAVER_COLLAGE_REVERSE
                    logging.info("Using weaving method WEAVER_COLLAGE_REVERSE.")
                else:
                    weaving_method = WEAVER_MEAN
                    logging.info("Using weaving method WEAVER_MEAN.")

                # Weave every stream
                if isinstance(das_registered[0], tuple):
                    for s in range(len(das_registered[0])):
                        streams = []
                        for da in das_registered:
                            streams.append(da[s])
                        da = stitching.weave(streams, weaving_method)
                        da.metadata[model.MD_DIMS] = "YX"  # TODO: do it in the weaver
                        st_data.append(da)
                else:
                    da = stitching.weave(das_registered, weaving_method)
                    st_data.append(da)

                # Save
                exporter = dataio.find_fittest_converter(fn)
                if exporter.CAN_SAVE_PYRAMID:
                    exporter.export(fn, st_data, pyramid=True)
                else:
                    logging.warning("File format doesn't support saving image in pyramidal form")
                    exporter.export(fn, st_data)

            ft.set_result(None)  # Indicate it's over

            # End of the (completed) acquisition
            if ft._task_state == CANCELLED:
                raise CancelledError()
            dlg.Close()

            # Open analysis tab
            if st_data:
                self.showAcquisition(fn)

            # TODO: also export a full image (based on reported position, or based
            # on alignment detection)
        except CancelledError:
            logging.debug("Acquisition cancelled")
            dlg.resumeSettings()
        except Exception as ex:
            logging.exception("Acquisition failed.")
            ft.running_subf.cancel()
            ft.set_result(None)
            # Show also in the window. It will be hidden next time a setting is changed.
            self._dlg.setAcquisitionInfo("Acquisition failed: %s" % (ex,),
                                         lvl=logging.ERROR)
        finally:
            logging.info("Tiled acquisition ended")
            main_data.stage.moveAbs(orig_pos)
Example #15
0
    def export(self, dlg):
        """
        Stores the current CL data into a PNG file
        """
        f = model.ProgressiveFuture()

        # Note: the user never needs to store the raw data or the SEM data
        try:
            das = self._acquire(dlg, f)
        except CancelledError:
            logging.debug("Stopping acquisition + export, as it was cancelled")
            return
        except Exception as e:
            logging.exception("Failed to acquire CL data: %s", e)
            return

        exporter = dataio.find_fittest_converter(self.filename.value,
                                                 allowlossy=True)

        ss = self._get_acq_streams()
        for s in ss:
            if len(ss) > 1:
                # Add a -StreamName after the filename
                bn, ext = splitext(self.filename.value)
                fn = bn + "-" + s.name.value + ext
            else:
                fn = self.filename.value

            # We actually don't care about the DAs, and will get the corresponding
            # .image, as it has been projected to RGB.
            rgbi = s.image.value
            try:
                while rgbi.metadata[model.MD_ACQ_DATE] < s.raw[0].metadata[
                        model.MD_ACQ_DATE]:
                    logging.debug("Waiting a for the RGB projection")
                    time.sleep(1)
                    rgbi = s.image.value
            except KeyError:
                # No date to check => let's hope it's fine
                pass

            try:
                if self.hasDatabar.value:
                    # Use MPP and FoV so that the whole image is displayed, at 1:1
                    view_pos = rgbi.metadata[model.MD_POS]
                    pxs = rgbi.metadata[model.MD_PIXEL_SIZE]
                    # Shape is YXC
                    view_hfw = rgbi.shape[1] * pxs[0], rgbi.shape[0] * pxs[1]
                    exdata = img.images_to_export_data(
                        [s],
                        view_hfw,
                        view_pos,
                        draw_merge_ratio=1.0,
                        raw=False,
                        interpolate_data=False,
                        logo=self.main_app.main_frame.legend_logo)
                else:
                    exdata = rgbi

                exporter.export(fn, exdata)
            except Exception:
                logging.exception("Failed to store data in %s", fn)

        f.set_result(None)  # Indicate it's over
        self._update_filename()
Example #16
0
    def __init__(self,
                 streams,
                 stage,
                 area,
                 overlap,
                 settings_obs=None,
                 log_path=None,
                 future=None,
                 zlevels=None,
                 registrar=REGISTER_GLOBAL_SHIFT,
                 weaver=WEAVER_MEAN):
        """
        :param streams: (Stream) the streams to acquire
        :param stage: (Actuator) the sample stage to move to the possible tiles locations
        :param area: (float, float, float, float) left, top, right, bottom points of acquisition area
        :param overlap: (float) the amount of overlap between each acquisition
        :param settings_obs: (SettingsObserver or None) class that contains a list of all VAs
            that should be saved as metadata
        :param log_path: (string) directory and filename pattern to save acquired images for debugging
        :param future: (ProgressiveFuture or None) future to track progress, pass None for estimation only
        :param zlevels: (list(float) or None) focus z positions required zstack acquisition
        :param registrar: (REGISTER_*) type of registration method
        :param weaver: (WEAVER_*) type of weaving method
        """
        self._future = future
        self._streams = streams
        self._stage = stage
        # Get total area as a tuple of width, height from ltrb area points
        normalized_area = util.normalize_rect(area)
        if area[0] != normalized_area[0] or area[1] != normalized_area[1]:
            logging.warning("Acquisition area {} rearranged into {}".format(
                area, normalized_area))

        self._area = normalized_area
        height = normalized_area[3] - normalized_area[1]
        width = normalized_area[2] - normalized_area[0]
        self._area_size = (width, height)
        self._overlap = overlap

        if future:
            # Change the SEM stream horizontalFoV VA to the max if it's found
            for stream in self._streams:
                if model.hasVA(stream, "horizontalFoV"):
                    # Clip horizontal fov to total area in case it's smaller than max. value
                    stream.horizontalFoV.value = stream.horizontalFoV.clip(
                        max(self._area_size))

        # Get the smallest field of view
        self._sfov = self._guessSmallestFov(streams)
        logging.debug("Smallest FoV: %s", self._sfov)

        (self._nx, self._ny), self._starting_pos = self._getNumberOfTiles()

        # To use in re-focusing acquired images in case they fell out of focus
        # TODO: make adjust focus optional
        self._focus_stream = next(
            (sd for sd in self._streams if sd.focuser is not None), None)
        if self._focus_stream:
            # save initial focus value to be used in the AutoFocus function
            self._good_focus = self._focus_stream.focuser.position.value['z']
            focuser_range = self._focus_stream.focuser.axes['z'].range
            # Calculate the focus range by half the focus margin on each side of good focus
            focus_rng = (self._good_focus - FOCUS_RANGE_MARGIN / 2,
                         self._good_focus + FOCUS_RANGE_MARGIN / 2)
            # Clip values with focuser_range
            self._focus_rng = (max(focus_rng[0], focuser_range[0]),
                               min((focus_rng[1], focuser_range[1])))
            logging.debug("Calculated focus range ={}".format(self._focus_rng))

        # Rough estimate of the stage movement speed, for estimating the extra
        # duration due to movements
        self._move_speed = MOVE_SPEED_DEFAULT
        if model.hasVA(stage, "speed"):
            try:
                self._move_speed = (stage.speed.value["x"] +
                                    stage.speed.value["y"]) / 2
            except Exception as ex:
                logging.warning("Failed to read the stage speed: %s", ex)

        # TODO: allow to change the stage movement pattern
        self._settings_obs = settings_obs

        self._log_path = log_path
        if self._log_path:
            filename = os.path.basename(self._log_path)
            if not filename:
                raise ValueError("Filename is not found on log path.")
            self._exporter = dataio.find_fittest_converter(filename)
            self._fn_bs, self._fn_ext = udataio.splitext(filename)
            self._log_dir = os.path.dirname(self._log_path)

        if zlevels:
            if self._focus_stream is None:
                logging.warning(
                    "No focuser found in any of the streams, only one acquisition will be performed."
                )
            self._zlevels = zlevels
        else:
            self._zlevels = []

        self._registrar = registrar
        self._weaver = weaver
Example #17
0
    def __init__(self,
                 streams,
                 stage,
                 area,
                 overlap,
                 settings_obs=None,
                 log_path=None,
                 future=None,
                 zlevels=None,
                 registrar=REGISTER_GLOBAL_SHIFT,
                 weaver=WEAVER_MEAN,
                 focusing_method=FocusingMethod.NONE):
        """
        :param streams: (list of Streams) the streams to acquire
        :param stage: (Actuator) the sample stage to move to the possible tiles locations
        :param area: (float, float, float, float) left, top, right, bottom points of acquisition area
        :param overlap: (float) the amount of overlap between each acquisition
        :param settings_obs: (SettingsObserver or None) class that contains a list of all VAs
            that should be saved as metadata
        :param log_path: (string) directory and filename pattern to save acquired images for debugging
        :param future: (ProgressiveFuture or None) future to track progress, pass None for estimation only
        :param zlevels: (list(float) or None) focus z positions required zstack acquisition.
           Currently, can only be used if focusing_method == MAX_INTENSITY_PROJECTION.
        :param registrar: (REGISTER_*) type of registration method
        :param weaver: (WEAVER_*) type of weaving method
        :param focusing_method: (FocusingMethod) Defines when will the autofocuser be run.
           The autofocuser uses the first stream with a .focuser.
           If MAX_INTENSITY_PROJECTION is used, zlevels must be provided too.
        """
        self._future = future
        self._streams = streams
        self._stage = stage
        # Get total area as a tuple of width, height from ltrb area points
        normalized_area = util.normalize_rect(area)
        if area[0] != normalized_area[0] or area[1] != normalized_area[1]:
            logging.warning("Acquisition area {} rearranged into {}".format(
                area, normalized_area))

        self._area = normalized_area
        height = normalized_area[3] - normalized_area[1]
        width = normalized_area[2] - normalized_area[0]
        self._area_size = (width, height)
        self._overlap = overlap

        # Note: we used to change the stream horizontalFoV VA to the max, if available (eg, SEM).
        # However, it's annoying if the caller actually cares about the FoV (eg,
        # because it wants a large number of pixels, or there are artifacts at
        # the largest FoV). In addition, it's quite easy to do for the caller anyway.
        # Something like this:
        # for stream in self._streams:
        #     if model.hasVA(stream, "horizontalFoV"):
        #         stream.horizontalFoV.value = stream.horizontalFoV.clip(max(self._area_size))

        # Get the smallest field of view
        self._sfov = self._guessSmallestFov(streams)
        logging.debug("Smallest FoV: %s", self._sfov)

        (self._nx, self._ny), self._starting_pos = self._getNumberOfTiles()

        # To check and adjust the focus in between tiles
        if not isinstance(focusing_method, FocusingMethod):
            raise ValueError(
                f"focusing_method should be of type FocusingMethod, but got {focusing_method}"
            )
        self._focusing_method = focusing_method
        self._focus_stream = next(
            (sd for sd in self._streams if sd.focuser is not None), None)
        if self._focus_stream:
            # save initial focus value to be used in the AutoFocus function
            self._good_focus = self._focus_stream.focuser.position.value['z']
            focuser_range = self._focus_stream.focuser.axes['z'].range
            # Calculate the focus range by half the focus margin on each side of good focus
            focus_rng = (self._good_focus - FOCUS_RANGE_MARGIN / 2,
                         self._good_focus + FOCUS_RANGE_MARGIN / 2)
            # Clip values with focuser_range
            self._focus_rng = (max(focus_rng[0], focuser_range[0]),
                               min((focus_rng[1], focuser_range[1])))
            logging.debug("Calculated focus range ={}".format(self._focus_rng))

        if focusing_method == FocusingMethod.MAX_INTENSITY_PROJECTION and not zlevels:
            raise ValueError(
                "MAX_INTENSITY_PROJECTION requires zlevels, but none passed")
            # Note: we even allow if only one zlevels. It would not do MIP, but
            # that allows for flexibility where the user explicitly wants to disable
            # MIP by setting only one zlevel. Same if there is no focuser.

        if zlevels:
            if self._focus_stream is None:
                logging.warning(
                    "No focuser found in any of the streams, only one acquisition will be performed."
                )
            self._zlevels = zlevels
        else:
            self._zlevels = []

        if len(
                self._zlevels
        ) > 1 and focusing_method != FocusingMethod.MAX_INTENSITY_PROJECTION:
            raise NotImplementedError(
                "Multiple zlevels currently only works with focusing method MAX_INTENSITY_PROJECTION"
            )

        # For "ON_LOW_FOCUS_LEVEL" method: a focus level which is corresponding to a in-focus image.
        self._good_focus_level = None  # float

        # Rough estimate of the stage movement speed, for estimating the extra
        # duration due to movements
        self._move_speed = MOVE_SPEED_DEFAULT
        if model.hasVA(stage, "speed"):
            try:
                self._move_speed = (stage.speed.value["x"] +
                                    stage.speed.value["y"]) / 2
            except Exception as ex:
                logging.warning("Failed to read the stage speed: %s", ex)

        # TODO: allow to change the stage movement pattern
        self._settings_obs = settings_obs

        self._log_path = log_path
        if self._log_path:
            filename = os.path.basename(self._log_path)
            if not filename:
                raise ValueError("Filename is not found on log path.")
            self._exporter = dataio.find_fittest_converter(filename)
            self._fn_bs, self._fn_ext = udataio.splitext(filename)
            self._log_dir = os.path.dirname(self._log_path)

        self._registrar = registrar
        self._weaver = weaver
Example #18
0
def guess_pattern(fn):
    """
    Generates a filename pattern form a given filename. The function detects certain 
    formats of dates, time, and counters. It returns a string with replacement fields which
    can be used in create_filename to provide an updated version of the filename.
    input
        fn (String): filename (may include path and extension)
    returns 
        fn_ptn (String): filename pattern (without path and extension)
        cnt (String): counter
    """

    _, fn_ptn = os.path.split(fn)

    # Get extension
    fn_ptn, _ = splitext(fn_ptn)

    # Detect {}
    fn_ptn = re.sub('{', "{{", fn_ptn)
    fn_ptn = re.sub('}', "}}", fn_ptn)

    # Detect date
    # First check daterev, then datelng because sequence daterev + timelng might otherwise
    # be mistaken to be number + datelng + counter
    date_ptn = '[0-3][0-9](0[1-9]|1[0-2])%s' % time.strftime('%Y')
    fn_ptn = re.sub(date_ptn, "{daterev}", fn_ptn)

    date_ptn = '%s(0[1-9]|1[0-2])[0-3][0-9]' % time.strftime('%Y')
    fn_ptn = re.sub(date_ptn, "{datelng}", fn_ptn)

    date_ptn = '[0-3][0-9]-(0[1-9]|1[0-2])-%s' % time.strftime('%Y')
    fn_ptn = re.sub(date_ptn, "{daterev_hyphen}", fn_ptn)

    date_ptn = '%s-(0[1-9]|1[0-2])-[0-3][0-9]' % time.strftime('%Y')
    fn_ptn = re.sub(date_ptn, "{datelng_hyphen}", fn_ptn)

    year_ptn = '%s' % time.strftime('%Y')
    fn_ptn = re.sub(year_ptn, "{year}", fn_ptn)

    # Detect time h-min-s
    time_ptn = '[0-2][0-9][0-5][0-9][0-5][0-9]'
    fn_ptn = re.sub(time_ptn, "{timelng}", fn_ptn)

    time_ptn = '[0-2][0-9]:[0-5][0-9]:[0-5][0-9]'
    fn_ptn = re.sub(time_ptn, "{timelng_colon}", fn_ptn)

    time_ptn = '[0-2][0-9]-[0-5][0-9]-[0-5][0-9]'
    fn_ptn = re.sub(time_ptn, "{timelng_hyphen}", fn_ptn)

    # Detect time h-min
    time_ptn = '[0-2][0-9]:[0-5][0-9]'
    fn_ptn = re.sub(time_ptn, "{timeshrt_colon}", fn_ptn)

    # Don't allow separation of short time by hyphen, causes problems with some names

    # If 4-digit number corresponds to time in an interval of +- 15 min
    # around the current time, recognize it as time pattern, otherwise
    # assume it's a counter.
    time_ptn = '[0-2][0-9][0-5][0-9]'
    if re.search('[0-2][0-9][0-5][0-9]', fn_ptn):
        h = int(re.search('[0-2][0-9][0-5][0-9]', fn_ptn).group()[:2])
        m = int(re.search('[0-2][0-9][0-5][0-9]', fn_ptn).group()[2:])
        t_s = h * 3600 + m * 60
        cur_t_s = int(time.strftime('%H')) * 3600 + int(
            time.strftime('%M')) * 60
        if cur_t_s - 900 < t_s < cur_t_s + 900:
            fn_ptn = re.sub(time_ptn, "{timeshrt}", fn_ptn)

    # Detect count
    cnt_ptn = r'\d{1,5}'
    cnt_m = None
    for m in re.finditer(cnt_ptn, fn_ptn):
        cnt_m = m
    # if multiple numbers are present, use last
    if cnt_m:
        cnt = cnt_m.group()
        fn_ptn = fn_ptn[:cnt_m.start()] + "{cnt}" + fn_ptn[cnt_m.end():]
    else:
        cnt = "001"  # will be used in case cnt pattern is added afterwards

    # If neither time, nor count are specified, add count, if only short time (h, min)
    # specified and no count, add seconds to make filename unique
    # Doesn't behave properly if user enters terms with curly braces like {{cnt}}.
    if '{cnt}' not in fn_ptn:
        if '{timeshrt}' in fn_ptn:
            fn_ptn = re.sub('{timeshrt}', '{timelng}', fn_ptn)
        if '{timeshrt_colon}' in fn_ptn:
            fn_ptn = re.sub('{timeshrt_colon}', '{timelng_colon}', fn_ptn)
        elif ("{timelng}" in fn_ptn or "{timelng_colon}" in fn_ptn
              or "{timelng_hyphen}" in fn_ptn):
            pass
        else:
            fn_ptn = fn_ptn + '-{cnt}'

    return fn_ptn, cnt
Example #19
0
def guess_pattern(fn):
    """
    Generates a filename pattern form a given filename. The function detects certain 
    formats of dates, time, and counters. It returns a string with replacement fields which
    can be used in create_filename to provide an updated version of the filename.
    input
        fn (String): filename (may include path and extension)
    returns 
        fn_ptn (String): filename pattern (without path and extension)
        cnt (String): counter
    """

    _, fn_ptn = os.path.split(fn)

    # Get extension
    fn_ptn, _ = splitext(fn_ptn)

    # Detect {}
    fn_ptn = re.sub('{', "{{", fn_ptn)
    fn_ptn = re.sub('}', "}}", fn_ptn)

    # Detect date
    # First check daterev, then datelng because sequence daterev + timelng might otherwise
    # be mistaken to be number + datelng + counter
    date_ptn = '[0-3][0-9](0[1-9]|1[0-2])%s' % time.strftime('%Y')
    fn_ptn = re.sub(date_ptn, "{daterev}", fn_ptn)

    date_ptn = '%s(0[1-9]|1[0-2])[0-3][0-9]' % time.strftime('%Y')
    fn_ptn = re.sub(date_ptn, "{datelng}", fn_ptn)

    date_ptn = '[0-3][0-9]-(0[1-9]|1[0-2])-%s' % time.strftime('%Y')
    fn_ptn = re.sub(date_ptn, "{daterev_hyphen}", fn_ptn)

    date_ptn = '%s-(0[1-9]|1[0-2])-[0-3][0-9]' % time.strftime('%Y')
    fn_ptn = re.sub(date_ptn, "{datelng_hyphen}", fn_ptn)

    year_ptn = '%s' % time.strftime('%Y')
    fn_ptn = re.sub(year_ptn, "{year}", fn_ptn)

    # Detect time h-min-s
    time_ptn = '[0-2][0-9][0-5][0-9][0-5][0-9]'
    fn_ptn = re.sub(time_ptn, "{timelng}", fn_ptn)

    time_ptn = '[0-2][0-9]:[0-5][0-9]:[0-5][0-9]'
    fn_ptn = re.sub(time_ptn, "{timelng_colon}", fn_ptn)

    time_ptn = '[0-2][0-9]-[0-5][0-9]-[0-5][0-9]'
    fn_ptn = re.sub(time_ptn, "{timelng_hyphen}", fn_ptn)

    # Detect time h-min
    time_ptn = '[0-2][0-9]:[0-5][0-9]'
    fn_ptn = re.sub(time_ptn, "{timeshrt_colon}", fn_ptn)

    # Don't allow separation of short time by hyphen, causes problems with some names

    # If 4-digit number corresponds to time in an interval of +- 15 min
    # around the current time, recognize it as time pattern, otherwise
    # assume it's a counter.
    time_ptn = '[0-2][0-9][0-5][0-9]'
    if re.search('[0-2][0-9][0-5][0-9]', fn_ptn):
        h = int(re.search('[0-2][0-9][0-5][0-9]', fn_ptn).group()[:2])
        m = int(re.search('[0-2][0-9][0-5][0-9]', fn_ptn).group()[2:])
        t_s = h * 3600 + m * 60
        cur_t_s = int(time.strftime('%H')) * 3600 + int(time.strftime('%M')) * 60
        if cur_t_s - 900 < t_s < cur_t_s + 900:
            fn_ptn = re.sub(time_ptn, "{timeshrt}", fn_ptn)

    # Detect count
    cnt_ptn = r'\d{1,5}'
    cnt_m = None
    for m in re.finditer(cnt_ptn, fn_ptn):
        cnt_m = m
    # if multiple numbers are present, use last
    if cnt_m:
        cnt = cnt_m.group()
        fn_ptn = fn_ptn[:cnt_m.start()] + "{cnt}" + fn_ptn[cnt_m.end():]
    else:
        cnt = "001"  # will be used in case cnt pattern is added afterwards

    # If neither time, nor count are specified, add count, if only short time (h, min)
    # specified and no count, add seconds to make filename unique
    # Doesn't behave properly if user enters terms with curly braces like {{cnt}}.
    if '{cnt}' not in fn_ptn:
        if '{timeshrt}' in fn_ptn:
            fn_ptn = re.sub('{timeshrt}', '{timelng}', fn_ptn)
        if '{timeshrt_colon}' in fn_ptn:
            fn_ptn = re.sub('{timeshrt_colon}', '{timelng_colon}', fn_ptn)
        elif ("{timelng}" in fn_ptn or
              "{timelng_colon}" in fn_ptn or
              "{timelng_hyphen}" in fn_ptn):
            pass
        else:
            fn_ptn = fn_ptn + '-{cnt}'

    return fn_ptn, cnt
Example #20
0
    def __init__(self,
                 streams,
                 stage,
                 area,
                 overlap,
                 settings_obs=None,
                 log_path=None,
                 future=None):
        """
        :param streams: (Stream) the streams to acquire
        :param stage: (Actuator) the sample stage to move to the possible tiles locations
        :param area: (float, float, float, float) left, top, right, bottom points of acquisition area
        :param overlap: (float) the amount of overlap between each acquisition
        :param settings_obs: (SettingsObserver or None) class that contains a list of all VAs
            that should be saved as metadata
        :param log_path: (string) directory and filename pattern to save acquired images for debugging
        :param future: (ProgressiveFuture or None) future to track progress, pass None for estimation only
        """
        self._future = future
        self._streams = streams

        # Get total area as a tuple of width, height from ltrb area points
        area = util.normalize_rect(area)
        height = area[3] - area[1]
        width = area[2] - area[0]
        self._total_area = (width, height)

        if future:
            # Change the SEM stream horizontalFoV VA to the max if it's found
            for stream in self._streams:
                if model.hasVA(stream, "horizontalFoV"):
                    # Take the max. of either width or height
                    stream.horizontalFoV.value = max(
                        stream.horizontalFoV.range[1])
                    # Clip horizontal fov to total area in case it's smaller than max. value
                    stream.horizontalFoV.value = stream.horizontalFoV.clip(
                        max(self._total_area))
        # Get the smallest field of view
        self._sfov = self._guessSmallestFov(streams)

        self._overlap = overlap
        self._nx, self._ny = self._getNumberOfTiles()

        # To use in re-focusing acquired images in case they fell out of focus
        self._focus_stream = next(
            (sd for sd in self._streams if sd.focuser is not None), None)
        if self._focus_stream:
            # save initial focus value to be used in the AutoFocus function
            self._good_focus = self._focus_stream.focuser.position.value['z']
            focuser_range = self._focus_stream.focuser.axes['z'].range
            # Calculate the focus range by half the focus margin on each side of good focus
            focus_rng = (self._good_focus - FOCUS_RANGE_MARGIN / 2,
                         self._good_focus + FOCUS_RANGE_MARGIN / 2)
            # Clip values with focuser_range
            self._focus_rng = (max(focus_rng[0], focuser_range[0]),
                               min((focus_rng[1], focuser_range[1])))
            logging.debug("Calculated focus range ={}".format(self._focus_rng))

        self._stage = stage
        self._starting_pos = {'x': area[0], 'y': area[1]}  # left, top
        # TODO: allow to change the stage movement pattern
        self._settings_obs = settings_obs

        self._log_path = log_path
        if self._log_path:
            filename = os.path.basename(self._log_path)
            if not filename:
                raise ValueError("Filename is not found on log path.")
            self._exporter = dataio.find_fittest_converter(filename)
            self._fn_bs, self._fn_ext = udataio.splitext(filename)
            self._log_dir = os.path.dirname(self._log_path)
Example #21
0
    def export(self, dlg):
        """
        Stores the current CL data into a PNG file
        """
        f = model.ProgressiveFuture()

        # Note: the user never needs to store the raw data or the SEM data
        try:
            das = self._acquire(dlg, f)
        except CancelledError:
            logging.debug("Stopping acquisition + export, as it was cancelled")
            return
        except Exception as e:
            logging.exception("Failed to acquire CL data: %s", e)
            return

        exporter = dataio.find_fittest_converter(self.filename.value, allowlossy=True)

        ss = self._get_acq_streams()
        for s in ss:
            if len(ss) > 1:
                # Add a -StreamName after the filename
                bn, ext = splitext(self.filename.value)
                fn = bn + "-" + s.name.value + ext
            else:
                fn = self.filename.value

            # We actually don't care about the DAs, and will get the corresponding
            # .image, as it has been projected to RGB.
            rgbi = s.image.value
            try:
                while rgbi.metadata[model.MD_ACQ_DATE] < s.raw[0].metadata[model.MD_ACQ_DATE]:
                    logging.debug("Waiting a for the RGB projection")
                    time.sleep(1)
                    rgbi = s.image.value
            except KeyError:
                # No date to check => let's hope it's fine
                pass

            try:
                if self.hasDatabar.value:
                    # Use MPP and FoV so that the whole image is displayed, at 1:1
                    view_pos = rgbi.metadata[model.MD_POS]
                    pxs = rgbi.metadata[model.MD_PIXEL_SIZE]
                    # Shape is YXC
                    view_hfw = rgbi.shape[1] * pxs[0], rgbi.shape[0] * pxs[1]
                    exdata = img.images_to_export_data([s],
                                                       view_hfw, view_pos,
                                                       draw_merge_ratio=1.0,
                                                       raw=False,
                                                       interpolate_data=False,
                                                       logo=self.main_app.main_frame.legend_logo)
                else:
                    exdata = rgbi

                exporter.export(fn, exdata)
            except Exception:
                logging.exception("Failed to store data in %s", fn)

        f.set_result(None)  # Indicate it's over
        self._update_filename()
Example #22
0
    def acquire(self, dlg):
        main_data = self.main_app.main_data
        str_ctrl = self._tab.streambar_controller
        str_ctrl.pauseStreams()
        dlg.pauseSettings()
        self._unsubscribe_vas()

        orig_pos = main_data.stage.position.value
        trep = (self.nx.value, self.ny.value)
        nb = trep[0] * trep[1]
        # It's not a big deal if it was a bad guess as we'll use the actual data
        # before the first move
        sfov = self._guess_smallest_fov()
        fn = self.filename.value
        exporter = dataio.find_fittest_converter(fn)
        fn_bs, fn_ext = udataio.splitext(fn)

        ss, stitch_ss = self._get_acq_streams()
        end = self.estimate_time() + time.time()

        ft = model.ProgressiveFuture(end=end)
        self.ft = ft  # allows future to be canceled in show_dlg after closing window
        ft.running_subf = model.InstantaneousFuture()
        ft._task_state = RUNNING
        ft._task_lock = threading.Lock()
        ft.task_canceller = self._cancel_acquisition  # To allow cancelling while it's running
        ft.set_running_or_notify_cancel()  # Indicate the work is starting now
        dlg.showProgress(ft)

        # For stitching only
        da_list = []  # for each position, a list of DataArrays
        i = 0
        prev_idx = [0, 0]
        try:
            for ix, iy in self._generate_scanning_indices(trep):
                logging.debug("Acquiring tile %dx%d", ix, iy)
                self._move_to_tile((ix, iy), orig_pos, sfov, prev_idx)
                prev_idx = ix, iy
                # Update the progress bar
                ft.set_progress(end=self.estimate_time(nb - i) + time.time())

                ft.running_subf = acqmng.acquire(
                    ss, self.main_app.main_data.settings_obs)
                das, e = ft.running_subf.result(
                )  # blocks until all the acquisitions are finished
                if e:
                    logging.warning(
                        "Acquisition for tile %dx%d partially failed: %s", ix,
                        iy, e)

                if ft._task_state == CANCELLED:
                    raise CancelledError()

                # TODO: do in a separate thread
                fn_tile = "%s-%.5dx%.5d%s" % (fn_bs, ix, iy, fn_ext)
                logging.debug("Will save data of tile %dx%d to %s", ix, iy,
                              fn_tile)
                exporter.export(fn_tile, das)

                if ft._task_state == CANCELLED:
                    raise CancelledError()

                if self.stitch.value:
                    # Sort tiles (largest sem on first position)
                    da_list.append(self.sort_das(das, stitch_ss))

                # Check the FoV is correct using the data, and if not update
                if i == 0:
                    sfov = self._check_fov(das, sfov)
                i += 1

            # Move stage to original position
            main_data.stage.moveAbs(orig_pos)

            # Stitch SEM and CL streams
            st_data = []
            if self.stitch.value and (not da_list or not da_list[0]):
                # if only AR or Spectrum are acquired
                logging.warning(
                    "No stream acquired that can be used for stitching.")
            elif self.stitch.value:
                logging.info("Acquisition completed, now stitching...")
                ft.set_progress(end=self.estimate_time(0) + time.time())

                logging.info("Computing big image out of %d images",
                             len(da_list))
                das_registered = stitching.register(da_list)

                # Select weaving method
                # On a Sparc system the mean weaver gives the best result since it
                # smoothes the transitions between tiles. However, using this weaver on the
                # Secom/Delphi generates an image with dark stripes in the overlap regions which are
                # the result of carbon decomposition effects that typically occur in samples imaged
                # by these systems. To mediate this, we use the
                # collage_reverse weaver that only shows the overlap region of the tile that
                # was imaged first.
                if self.microscope.role in ("secom", "delphi"):
                    weaving_method = WEAVER_COLLAGE_REVERSE
                    logging.info(
                        "Using weaving method WEAVER_COLLAGE_REVERSE.")
                else:
                    weaving_method = WEAVER_MEAN
                    logging.info("Using weaving method WEAVER_MEAN.")

                # Weave every stream
                if isinstance(das_registered[0], tuple):
                    for s in range(len(das_registered[0])):
                        streams = []
                        for da in das_registered:
                            streams.append(da[s])
                        da = stitching.weave(streams, weaving_method)
                        da.metadata[
                            model.MD_DIMS] = "YX"  # TODO: do it in the weaver
                        st_data.append(da)
                else:
                    da = stitching.weave(das_registered, weaving_method)
                    st_data.append(da)

                # Save
                exporter = dataio.find_fittest_converter(fn)
                if exporter.CAN_SAVE_PYRAMID:
                    exporter.export(fn, st_data, pyramid=True)
                else:
                    logging.warning(
                        "File format doesn't support saving image in pyramidal form"
                    )
                    exporter.export(fn, st_data)

            ft.set_result(None)  # Indicate it's over

            # End of the (completed) acquisition
            if ft._task_state == CANCELLED:
                raise CancelledError()
            dlg.Close()

            # Open analysis tab
            if st_data:
                popup.show_message(self.main_app.main_frame,
                                   "Tiled acquisition complete",
                                   "Will display stitched image")
                self.showAcquisition(fn)
            else:
                popup.show_message(self.main_app.main_frame,
                                   "Tiled acquisition complete",
                                   "Will display last tile")
                # It's easier to know the last filename, and it's also the most
                # interesting for the user, as if something went wrong (eg, focus)
                # it's the tile the most likely to show it.
                self.showAcquisition(fn_tile)

            # TODO: also export a full image (based on reported position, or based
            # on alignment detection)
        except CancelledError:
            logging.debug("Acquisition cancelled")
            dlg.resumeSettings()
        except Exception as ex:
            logging.exception("Acquisition failed.")
            ft.running_subf.cancel()
            ft.set_result(None)
            # Show also in the window. It will be hidden next time a setting is changed.
            self._dlg.setAcquisitionInfo("Acquisition failed: %s" % (ex, ),
                                         lvl=logging.ERROR)
        finally:
            logging.info("Tiled acquisition ended")
            main_data.stage.moveAbs(orig_pos)
Example #23
0
    def export_viewport(self, filepath, export_format, export_type):
        """ Export the image from the focused view to the filesystem.

        :param filepath: (str) full path to the destination file
        :param export_format: (str) the format name
        :param export_type: (str) spatial, AR, spectrum or spectrum-line
        """
        try:
            exporter = get_converter(export_format)
            raw = export_format in EXPORTERS[export_type][1]
            self._conf.export_raw = raw
            self._conf.last_export_path = os.path.dirname(filepath)

            exported_data = self.export(export_type, raw)

            # batch export
            # TODO: for now we do not create a new folder where all files are saved

            if isinstance(exported_data, dict):
                # get the file names
                filename_dict = {}
                n_exist = 0
                dir_name, base = os.path.split(filepath)
                base_name, file_extension = splitext(base)
                for key in exported_data.keys():
                    # use the filename defined by the user and add the MD_POL_MODE to the filename
                    filename = os.path.join(
                        dir_name, base_name + "_" + key + file_extension)

                    # detect we'd overwrite an existing file => show our own warning
                    if os.path.exists(filename):
                        n_exist += 1

                    filename_dict[key] = filename

                if n_exist:
                    dlg = wx.MessageDialog(
                        self._main_frame,
                        "Some files (%d/%d) already exists.\n"
                        "Do you want to replace them?" %
                        (n_exist, len(filename_dict)), "Files already exist",
                        wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
                    ret = dlg.ShowModal()
                    dlg.Destroy()
                    if ret == wx.ID_NO:
                        return

                # record everything to a file
                for key, data in exported_data.items():
                    exporter.export(filename_dict[key], data)

                # TODO need to be adapted for batch export as now redundant
                popup.show_message(
                    self._main_frame,
                    "Images exported",
                    "Stored as %s" % (os.path.join(
                        dir_name, base_name + "_" + "xxx" + file_extension), ),
                    timeout=3)

                logging.info(
                    "Exported a %s view into files with name of type '%s'.",
                    export_type,
                    os.path.join(dir_name,
                                 base_name + "_" + "mode" + file_extension))

            else:  # single file export
                exporter.export(filepath, exported_data)

                popup.show_message(self._main_frame,
                                   "Image exported",
                                   "Stored in %s" % (filepath, ),
                                   timeout=3)

                logging.info("Exported a %s view into file '%s'.", export_type,
                             filepath)
        except LookupError as ex:
            logging.info("Export of a %s view as %s seems to contain no data.",
                         export_type,
                         export_format,
                         exc_info=True)
            dlg = wx.MessageDialog(
                self._main_frame, "Failed to export: %s\n"
                "Please make sure that at least one stream is visible in the current view."
                % (ex, ), "No data to export", wx.OK | wx.ICON_WARNING)
            dlg.ShowModal()
            dlg.Destroy()
        except Exception:
            logging.exception("Failed to export a %s view as %s", export_type,
                              export_format)
Example #24
0
def guess_pattern(fn):
    """
    Generates a filename pattern form a given filename. The function detects certain 
    formats of dates, time, and counters. It returns a string with replacement fields which
    can be used in create_filename to provide an updated version of the filename.
    input
        fn (String): filename (may include path and extension)
    returns 
        fn_ptn (String): filename pattern (without path and extension)
        cnt (String): counter
    """

    _, fn_ptn = os.path.split(fn)

    # Get extension
    fn_ptn, _ = splitext(fn_ptn)

    # Detect {}
    fn_ptn = re.sub('{', "{{", fn_ptn)
    fn_ptn = re.sub('}', "}}", fn_ptn)

    # Detect date
    # First check daterev, then datelng because sequence daterev + timelng might otherwise
    # be mistaken to be number + datelng + counter
    date_ptn = time.strftime("%d%m%Y")
    fn_ptn = re.sub(date_ptn, "{daterev}", fn_ptn)

    date_ptn = time.strftime("%Y%m%d")
    fn_ptn = re.sub(date_ptn, "{datelng}", fn_ptn)

    date_ptn = time.strftime("%d-%m-%Y")
    fn_ptn = re.sub(date_ptn, "{daterev_hyphen}", fn_ptn)

    date_ptn = time.strftime("%Y-%m-%d")
    fn_ptn = re.sub(date_ptn, "{datelng_hyphen}", fn_ptn)

    # Short dates second, only in case the long version had no match
    date_ptn = time.strftime("%d%m%y")
    fn_ptn = re.sub(date_ptn, "{dshrtrev}", fn_ptn)

    date_ptn = time.strftime("%y%m%d")
    fn_ptn = re.sub(date_ptn, "{dateshrt}", fn_ptn)

    date_ptn = time.strftime("%d-%m-%y")
    fn_ptn = re.sub(date_ptn, "{dshrtrev_hyphen}", fn_ptn)

    date_ptn = time.strftime("%y-%m-%d")
    fn_ptn = re.sub(date_ptn, "{dateshrt_hyphen}", fn_ptn)

    year_ptn = '%s' % time.strftime('%Y')
    fn_ptn = re.sub(year_ptn, "{year}", fn_ptn)

    # Detect time, it must be within 5 min of the current time, to reduce risks of collision
    now = datetime.now()
    for name_ptn, time_ptn_re, time_ptn_strp in [
        ("{timelng}", "[0-2][0-9][0-5][0-9][0-5][0-9]", "%H%M%S"),
        ("{timelng_colon}", "[0-2][0-9]:[0-5][0-9]:[0-5][0-9]", "%H:%M:%S"),
        ("{timelng_hyphen}", "[0-2][0-9]-[0-5][0-9]-[0-5][0-9]", "%H-%M-%S"),
            # We used to support timeshrt without separation and with hyphen, but they
            # too easily collided with other meanings for the users, so we dropped them.
        ("{timeshrt_colon}", "[0-2][0-9]:[0-5][0-9]", "%H:%M")
    ]:
        m = re.search(time_ptn_re, fn_ptn)
        if m:
            time_match = m.group()
            # Use the current date, with the provided time
            time_found = datetime.combine(
                now.date(),
                datetime.strptime(time_match, time_ptn_strp).time())

            if abs(time_found - now) < timedelta(minutes=5):
                # That's a real match => change the pattern
                fn_ptn = fn_ptn[0:m.start()] + name_ptn + fn_ptn[m.end():]

    # Detect count
    cnt_ptn = r'\d{1,5}'
    cnt_m = None
    for m in re.finditer(cnt_ptn, fn_ptn):
        cnt_m = m
    # if multiple numbers are present, use last
    if cnt_m:
        cnt = cnt_m.group()
        fn_ptn = fn_ptn[:cnt_m.start()] + "{cnt}" + fn_ptn[cnt_m.end():]
    else:
        cnt = "001"  # will be used in case cnt pattern is added afterwards

    # If neither time, nor count are specified, add count, if only short time (h, min)
    # specified and no count, add seconds to make filename unique
    # Doesn't behave properly if user enters terms with curly braces like {{cnt}}.
    if '{cnt}' not in fn_ptn:
        if '{timeshrt_colon}' in fn_ptn:
            fn_ptn = re.sub('{timeshrt_colon}', '{timelng_colon}', fn_ptn)
        elif ("{timelng}" in fn_ptn or "{timelng_colon}" in fn_ptn
              or "{timelng_hyphen}" in fn_ptn):
            pass
        else:
            fn_ptn = fn_ptn + '-{cnt}'

    return fn_ptn, cnt