Exemplo n.º 1
0
    def test_aligned_stream(self):
        """
        Test the AlignedSEMStream
        """
        # Use fake ccd in order to have just one spot
        ccd = mock.FakeCCD(self.fake_img)

        # first try using the metadata correction
        st = stream.AlignedSEMStream("sem-md", self.sed, self.sed.data, self.ebeam,
                                     ccd, self.stage, self.focus, shiftebeam=stream.MTD_MD_UPD)

        # we don't really care about the SEM image, so the faster the better
        self.ebeam.dwellTime.value = self.ebeam.dwellTime.range[0]

        # start one image acquisition (so it should do the calibration)
        f = acq.acquire([st])
        received, _ = f.result()
        self.assertTrue(received, "No image received after 30 s")

        # Check the correction metadata is there
        md = self.sed.getMetadata()
        self.assertIn(model.MD_POS_COR, md)

        # Check the position of the image is correct
        pos_cor = md[model.MD_POS_COR]
        pos_dict = self.stage.position.value
        pos = (pos_dict["x"], pos_dict["y"])
        exp_pos = tuple(p - c for p, c in zip(pos, pos_cor))
        imd = received[0].metadata
        self.assertEqual(exp_pos, imd[model.MD_POS])

        # Check the calibration doesn't happen again on a second acquisition
        bad_cor = (-1, -1) # stupid impossible value
        self.sed.updateMetadata({model.MD_POS_COR: bad_cor})
        f = acq.acquire([st])
        received, _ = f.result()
        self.assertTrue(received, "No image received after 10 s")

        # if calibration has happened (=bad), it has changed the metadata
        md = self.sed.getMetadata()
        self.assertEqual(bad_cor, md[model.MD_POS_COR],
                            "metadata has been updated while it shouldn't have")

        # Check calibration happens again after a stage move
        f = self.stage.moveRel({"x": 100e-6})
        f.result() # make sure the move is over
        time.sleep(0.1) # make sure the stream had time to detect position has changed

        received = st.image.value
        f = acq.acquire([st])
        received, _ = f.result()
        self.assertTrue(received, "No image received after 30 s")

        # if calibration has happened (=good), it has changed the metadata
        md = self.sed.getMetadata()
        self.assertNotEqual(bad_cor, md[model.MD_POS_COR],
                            "metadata hasn't been updated while it should have")

        ccd.terminate()
Exemplo n.º 2
0
    def test_aligned_stream(self):
        """
        Test the AlignedSEMStream
        """
        # Use fake ccd in order to have just one spot
        ccd = mock.FakeCCD(self.fake_img)

        # first try using the metadata correction
        st = stream.AlignedSEMStream("sem-md", self.sed, self.sed.data, self.ebeam,
                                     ccd, self.stage, self.focus, shiftebeam=stream.MTD_MD_UPD)

        # we don't really care about the SEM image, so the faster the better
        self.ebeam.dwellTime.value = self.ebeam.dwellTime.range[0]

        # start one image acquisition (so it should do the calibration)
        f = acq.acquire([st])
        received, _ = f.result()
        self.assertTrue(received, "No image received after 30 s")

        # Check the correction metadata is there
        md = self.sed.getMetadata()
        self.assertIn(model.MD_POS_COR, md)

        # Check the position of the image is correct
        pos_cor = md[model.MD_POS_COR]
        pos_dict = self.stage.position.value
        pos = (pos_dict["x"], pos_dict["y"])
        exp_pos = tuple(p - c for p, c in zip(pos, pos_cor))
        imd = received[0].metadata
        self.assertEqual(exp_pos, imd[model.MD_POS])

        # Check the calibration doesn't happen again on a second acquisition
        bad_cor = (-1, -1) # stupid impossible value
        self.sed.updateMetadata({model.MD_POS_COR: bad_cor})
        f = acq.acquire([st])
        received, _ = f.result()
        self.assertTrue(received, "No image received after 10 s")

        # if calibration has happened (=bad), it has changed the metadata
        md = self.sed.getMetadata()
        self.assertEqual(bad_cor, md[model.MD_POS_COR],
                            "metadata has been updated while it shouldn't have")

        # Check calibration happens again after a stage move
        f = self.stage.moveRel({"x": 100e-6})
        f.result() # make sure the move is over
        time.sleep(0.1) # make sure the stream had time to detect position has changed

        f = acq.acquire([st])
        received, _ = f.result()
        self.assertTrue(received, "No image received after 30 s")

        # if calibration has happened (=good), it has changed the metadata
        md = self.sed.getMetadata()
        self.assertNotEqual(bad_cor, md[model.MD_POS_COR],
                            "metadata hasn't been updated while it should have")

        ccd.terminate()
Exemplo n.º 3
0
    def acquire(self, dlg):
        # Stop the spot stream and any other stream playing to not interfere with the acquisition
        try:
            str_ctrl = self.main_app.main_data.tab.value.streambar_controller
        except AttributeError: # Odemis v2.6 and earlier versions
            str_ctrl = self.main_app.main_data.tab.value.stream_controller
        stream_paused = str_ctrl.pauseStreams()

        strs = []
        if self._survey_s:
            strs.append(self._survey_s)

        strs.append(self._ARspectral_s)

        fn = self.filename.value
        exporter = dataio.find_fittest_converter(fn)

        try:
            f = acq.acquire(strs)
            dlg.showProgress(f)
            das, e = f.result()  # blocks until all the acquisitions are finished
        except CancelledError:
            pass
        finally:
            pass

        if not f.cancelled() and das:
            if e:
                logging.warning("AR spectral scan partially failed: %s", e)
            logging.debug("Will save data to %s", fn)
            logging.debug("Going to export data: %s", das)
            exporter.export(fn, das)

        dlg.Close()
Exemplo n.º 4
0
    def on_acquisition(self, evt):
        """
        Start the acquisition (really)
        Similar to win.acquisition.on_acquire()
        """
        self._pause_streams()

        self.btn_acquire.Disable()
        self.btn_cancel.Enable()

        self.gauge_acq.Show()
        self.btn_cancel.Show()

        self._main_data_model.is_acquiring.value = True

        # FIXME: probably not the whole window is required, just the file settings
        self._tab_panel.Layout()  # to put the gauge at the right place

        # start acquisition + connect events to callback
        streams = self._tab_data_model.acquisitionView.getStreams()

        self.acq_future = acq.acquire(streams)
        self._acq_future_connector = ProgressiveFutureConnector(self.acq_future,
                                                                self.gauge_acq,
                                                                self.lbl_acqestimate)
        self.acq_future.add_done_callback(self.on_acquisition_done)
Exemplo n.º 5
0
    def acquire(self, dlg):
        # Configure the monochromator stream according to the settings
        # TODO: read the value from spotPosition instead?
        self._mchr_s.emtTranslation.value = self.ebeam.translation.value
        strs = []
        if self._survey_s:
            strs.append(self._survey_s)
        strs.append(self._mchr_s)

        fn = self.filename.value
        exporter = dataio.find_fittest_converter(fn)

        # Stop the spot stream and any other stream playing to not interfere with the acquisition
        str_ctrl = self.main_app.main_data.tab.value.stream_controller
        stream_paused = str_ctrl.pauseStreams()

        try:
            # opm is the optical path manager, that ensures the path is set to the monochromator
            f = acq.acquire(strs, opm=self.main_app.main_data.opm)
            dlg.showProgress(f)
            das, e = f.result()
        finally:
            str_ctrl.resumeStreams(stream_paused)

        if not f.cancelled() and das:
            if e:
                logging.warning("Monochromator scan partially failed: %s", e)
            logging.debug("Will save data to %s", fn)
            exporter.export(fn, das)

            self.showAcquisition(fn)

        dlg.Destroy()
Exemplo n.º 6
0
    def on_acquire(self, evt):
        """
        Start the acquisition (really)
        """
        self.btn_secom_acquire.Disable()

        # disable estimation time updates during acquisition
        view = self._tab_data_model.focussedView.value
        view.lastUpdate.unsubscribe(self.on_streams_changed)

        # TODO: freeze all the settings so that it's not possible to change anything
        self._pause_settings()

        self.gauge_acq.Show()
        self.Layout() # to put the gauge at the right place

        # start acquisition + connect events to callback
        streams = self._tab_data_model.focussedView.value.getStreams()

        # Add the overlay stream if the fine alignment check box is checked
        if self.chkbox_fine_align.Value:
            streams.add(self._ovrl_stream)

        # It should never be possible to reach here with no streams
        self.acq_future = acq.acquire(streams)
        self._acq_future_connector = ProgessiveFutureConnector(self.acq_future,
                                                               self.gauge_acq,
                                                               self.lbl_acqestimate)
        self.acq_future.add_done_callback(self.on_acquisition_done)

        self.btn_cancel.Bind(wx.EVT_BUTTON, self.on_cancel)
Exemplo n.º 7
0
    def test_metadata(self):
        """
        Check if extra metadata are saved
        """
        settings_obs = SettingsObserver(model.getComponents())
        self.ccd.binning.value = (
            1, 1)  # make sure we don't save the right metadata by accident
        detvas = {'exposureTime', 'binning', 'gain'}
        s1 = stream.FluoStream("fluo2",
                               self.ccd,
                               self.ccd.data,
                               self.light,
                               self.light_filter,
                               detvas=detvas)
        s2 = stream.BrightfieldStream("bf",
                                      self.ccd,
                                      self.ccd.data,
                                      self.light,
                                      detvas=detvas)

        # Set different binning values for each stream
        s1.detBinning.value = (2, 2)
        s2.detBinning.value = (4, 4)
        st = stream.StreamTree(streams=[s1, s2])
        f = acq.acquire(st.getProjections(), settings_obs=settings_obs)
        data, e = f.result()
        for s in data:
            self.assertTrue(model.MD_EXTRA_SETTINGS in s.metadata,
                            "Stream %s didn't save extra metadata." % s)
        self.assertEqual(
            data[0].metadata[model.MD_EXTRA_SETTINGS][self.ccd.name]
            ['binning'], [(2, 2), 'px'])
        self.assertEqual(
            data[1].metadata[model.MD_EXTRA_SETTINGS][self.ccd.name]
            ['binning'], [(4, 4), 'px'])
Exemplo n.º 8
0
    def acquire(self, dlg):
        # Stop the spot stream and any other stream playing to not interfere with the acquisition
        try:
            str_ctrl = self.main_app.main_data.tab.value.streambar_controller
        except AttributeError:  # Odemis v2.6 and earlier versions
            str_ctrl = self.main_app.main_data.tab.value.stream_controller
        stream_paused = str_ctrl.pauseStreams()

        strs = []
        if self._survey_s:
            strs.append(self._survey_s)

        strs.append(self._ARspectral_s)

        fn = self.filename.value
        exporter = dataio.find_fittest_converter(fn)

        try:
            f = acq.acquire(strs)
            dlg.showProgress(f)
            das, e = f.result(
            )  # blocks until all the acquisitions are finished
        except CancelledError:
            pass
        finally:
            pass

        if not f.cancelled() and das:
            if e:
                logging.warning("AR spectral scan partially failed: %s", e)
            logging.debug("Will save data to %s", fn)
            logging.debug("Going to export data: %s", das)
            exporter.export(fn, das)

        dlg.Close()
Exemplo n.º 9
0
    def acquire(self, dlg):
        # Configure the monochromator stream according to the settings
        # TODO: read the value from spotPosition instead?
        self._mchr_s.emtTranslation.value = self.ebeam.translation.value
        strs = []
        if self._survey_s:
            strs.append(self._survey_s)
        strs.append(self._mchr_s)

        fn = self.filename.value
        exporter = dataio.find_fittest_converter(fn)

        # Stop the spot stream and any other stream playing to not interfere with the acquisition
        str_ctrl = self.main_app.main_data.tab.value.stream_controller
        stream_paused = str_ctrl.pauseStreams()

        try:
            # opm is the optical path manager, that ensures the path is set to the monochromator
            f = acq.acquire(strs, opm=self.main_app.main_data.opm)
            dlg.showProgress(f)
            das, e = f.result()
        except CancelledError:
            pass
        finally:
            str_ctrl.resumeStreams(stream_paused)

        if not f.cancelled() and das:
            if e:
                logging.warning("Monochromator scan partially failed: %s", e)
            logging.debug("Will save data to %s", fn)
            exporter.export(fn, das)

            self.showAcquisition(fn)

        dlg.Destroy()
Exemplo n.º 10
0
    def on_acquisition(self, evt):
        """
        Start the acquisition (really)
        Similar to win.acquisition.on_acquire()
        """
        # Time-resolved data cannot be saved in .ome.tiff format for now
        # OME-TIFF wants to save each time data on a separate "page", which causes too many pages.
        has_temporal = False
        for s in self._tab_data_model.streams.value:
            if (isinstance(s, ScannedTemporalSettingsStream)
                    or isinstance(s, ScannedTCSettingsStream)
                    or isinstance(s, TemporalSpectrumSettingsStream)):
                has_temporal = True

        if (self.conf.last_format == 'TIFF' or self.conf.last_format
                == 'Serialized TIFF') and has_temporal:
            raise NotImplementedError("Cannot save temporal data in %s format, data format must be HDF5." \
                                      % self.conf.last_format)

        self._pause_streams()

        self.btn_acquire.Disable()
        self.btn_cancel.Enable()
        self._main_data_model.is_acquiring.value = True

        self.gauge_acq.Show()
        self.btn_cancel.Show()
        self._show_status_icons(None)
        self._tab_panel.Layout()  # to put the gauge at the right place

        # start acquisition + connect events to callback
        self.acq_future = acq.acquire(self._tab_data_model.acquisitionStreams)
        self._acq_future_connector = ProgressiveFutureConnector(
            self.acq_future, self.gauge_acq, self.lbl_acqestimate)
        self.acq_future.add_done_callback(self.on_acquisition_done)
Exemplo n.º 11
0
    def test_cancel(self):
        """
        try a bit the cancelling possibility
        """
        # create a little complex streamTree
        st = stream.StreamTree(streams=[
            self.streams[2],
            stream.StreamTree(streams=self.streams[0:2])
        ])
        self.start = None
        self.end = None
        self.updates = 0
        self.done = False

        f = acq.acquire(st.getProjections())
        f.add_update_callback(self.on_progress_update)
        f.add_done_callback(self.on_done)

        time.sleep(0.5)  # make sure it's started
        self.assertTrue(f.running())
        f.cancel()

        self.assertRaises(CancelledError, f.result, 1)
        self.assertGreaterEqual(self.updates,
                                1)  # at least one update at cancellation
        self.assertLessEqual(self.end, time.time())
        self.assertTrue(self.done)
        self.assertTrue(f.cancelled())
Exemplo n.º 12
0
    def on_acquisition(self, evt):
        """
        Start the acquisition (really)
        Similar to win.acquisition.on_acquire()
        """
        # Time-resolved data cannot be saved in .ome.tiff format for now
        # OME-TIFF wants to save each time data on a separate "page", which causes too many pages.
        has_temporal = False
        for s in self._tab_data_model.streams.value:
            if (isinstance(s, ScannedTemporalSettingsStream) or
                isinstance(s, ScannedTCSettingsStream) or
                isinstance(s, TemporalSpectrumSettingsStream)):
                has_temporal = True

        if (self.conf.last_format == 'TIFF' or self.conf.last_format == 'Serialized TIFF') and has_temporal:
            raise NotImplementedError("Cannot save temporal data in %s format, data format must be HDF5." \
                                      % self.conf.last_format)

        self._pause_streams()

        self.btn_acquire.Disable()
        self.btn_cancel.Enable()
        self._main_data_model.is_acquiring.value = True

        self.gauge_acq.Show()
        self.btn_cancel.Show()
        self._show_status_icons(None)
        self._tab_panel.Layout()  # to put the gauge at the right place

        # start acquisition + connect events to callback
        self.acq_future = acq.acquire(self._tab_data_model.acquisitionStreams)
        self._acq_future_connector = ProgressiveFutureConnector(self.acq_future,
                                                                self.gauge_acq,
                                                                self.lbl_acqestimate)
        self.acq_future.add_done_callback(self.on_acquisition_done)
Exemplo n.º 13
0
    def test_acq_fine_align(self):
        """
        try acquisition with SEM + Optical + overlay streams
        """
        # Create the streams
        sems = stream.SEMStream("test sem", self.sed, self.sed.data, self.ebeam)
        # SEM settings are via the current hardware settings
        self.ebeam.dwellTime.value = self.ebeam.dwellTime.range[0]

        fs1 = stream.FluoStream("test orange", self.ccd, self.ccd.data,
                                self.light, self.light_filter)
        fs1.excitation.value = fs1.excitation.range[0] + 5e-9
        fs1.emission.value = fs1.emission.range[0] + 5e-9
        fs2 = stream.FluoStream("test blue", self.ccd, self.ccd.data,
                                self.light, self.light_filter)
        fs2.excitation.value = fs2.excitation.range[1] - 5e-9
        fs2.emission.value = fs2.emission.range[1] - 5e-9
        self.ccd.exposureTime.value = 0.1 # s

        ovrl = stream.OverlayStream("overlay", self.ccd, self.ebeam, self.sed)
        ovrl.dwellTime.value = 0.3
        ovrl.repetition.value = (7, 7)

        streams = [sems, fs1, fs2, ovrl]
        est_time = acq.estimateTime(streams)

        sum_est_time = sum(s.estimateAcquisitionTime() for s in streams)
        self.assertGreaterEqual(est_time, sum_est_time)

        # prepare callbacks
        self.past = None
        self.left = None
        self.updates = 0
        self.done = 0

        # Run acquisition
        start = time.time()
        f = acq.acquire(streams)
        f.add_update_callback(self.on_progress_update)
        f.add_done_callback(self.on_done)

        data = f.result()
        dur = time.time() - start
        self.assertGreater(dur, est_time / 2) # Estimated time shouldn't be too small

        self.assertIsInstance(data[0], model.DataArray)
        self.assertEqual(len(data), len(streams) - 1)

        # No overlay correction metadata anywhere (it has all been merged)
        for d in data:
            for k in [model.MD_ROTATION_COR, model.MD_PIXEL_SIZE_COR, model.MD_POS_COR]:
                self.assertNotIn(k, d.metadata)

#        thumb = acq.computeThumbnail(st, f)
#        self.assertIsInstance(thumb, model.DataArray)

        self.assertGreaterEqual(self.updates, 1) # at least one update at end
        self.assertEqual(self.left, 0)
        self.assertEqual(self.done, 1)
        self.assertTrue(not f.cancelled())
Exemplo n.º 14
0
    def test_cancel(self):
        """
        try a bit the cancelling possibility
        """
        # create a little complex streamTree
        st = stream.StreamTree(streams=[
                self.streams[2],
                stream.StreamTree(streams=self.streams[0:2])
                ])
        self.start = None
        self.end = None
        self.updates = 0
        self.done = False

        f = acq.acquire(st.getStreams())
        f.add_update_callback(self.on_progress_update)
        f.add_done_callback(self.on_done)

        time.sleep(0.5) # make sure it's started
        self.assertTrue(f.running())
        f.cancel()

        self.assertRaises(CancelledError, f.result, 1)
        self.assertGreaterEqual(self.updates, 1) # at least one update at cancellation
        self.assertLessEqual(self.end, time.time())
        self.assertTrue(self.done)
        self.assertTrue(f.cancelled())
Exemplo n.º 15
0
    def test_simple(self):
        # create a simple streamTree
        st = stream.StreamTree(streams=[self.streams[0]])
        f = acq.acquire(st.getStreams())
        data = f.result()
        self.assertIsInstance(data[0], model.DataArray)

        thumb = acq.computeThumbnail(st, f)
        self.assertIsInstance(thumb, model.DataArray)

        # let's do it a second time, "just for fun"
        f = acq.acquire(st.getStreams())
        data = f.result()
        self.assertIsInstance(data[0], model.DataArray)

        thumb = acq.computeThumbnail(st, f)
        self.assertIsInstance(thumb, model.DataArray)
Exemplo n.º 16
0
    def acquire(self, dlg):
        main_data = self.main_app.main_data
        str_ctrl = main_data.tab.value.streambar_controller
        stream_paused = str_ctrl.pauseStreams()
        dlg.pauseSettings()

        nb = self.numberOfAcquisitions.value
        p = self.period.value
        ss, last_ss = self._get_acq_streams()

        fn = self.filename.value
        exporter = dataio.find_fittest_converter(fn)
        bs, ext = splitext(fn)
        fn_pat = bs + "-%.5d" + ext

        sacqt = acq.estimateTime(ss)
        intp = max(0, p - sacqt)
        if p < sacqt:
            logging.warning(
                "Acquisition will take %g s, but period between acquisition must be only %g s",
                sacqt, p
            )

        # TODO: if drift correction, use it over all the time

        f = model.ProgressiveFuture()
        f.task_canceller = lambda l: True  # To allow cancelling while it's running
        f.set_running_or_notify_cancel()  # Indicate the work is starting now
        dlg.showProgress(f)

        for i in range(nb):
            left = nb - i
            dur = sacqt * left + intp * (left - 1)
            if left == 1 and last_ss:
                ss += last_ss
                dur += acq.estimateTime(ss) - sacqt

            startt = time.time()
            f.set_progress(end=startt + dur)
            das, e = acq.acquire(ss).result()
            if f.cancelled():
                dlg.resumeSettings()
                return

            exporter.export(fn_pat % (i,), das)

            # Wait the period requested, excepted the last time
            if left > 1:
                sleept = (startt + p) - time.time()
                if sleept > 0:
                    time.sleep(sleept)
                else:
                    logging.info("Immediately starting next acquisition, %g s late", -sleept)

        f.set_result(None)  # Indicate it's over

        # self.showAcquisition(self.filename.value)
        dlg.Close()
Exemplo n.º 17
0
    def _fast_acquire_one(self, dlg, st, last_ss):
        """
        Acquires one stream, *as fast as possible* (ie, the period is not used).
        Only works with LiveStreams (and not with MDStreams)
        st (LiveStream)
        last_ss (list of Streams): all the streams to be acquire on the last time
        """
        # Essentially, we trick a little bit the stream, by convincing it that
        # we want a live view, but instead of display the data, we store them.
        # It's much faster because we don't have to stop/start the detector between
        # each acquisition.
        nb = self.numberOfAcquisitions.value

        fn = self.filename.value
        self._exporter = dataio.find_fittest_converter(fn)
        bs, ext = splitext(fn)
        fn_pat = bs + "-%.5d" + ext

        self._acq_completed = threading.Event()

        f = model.ProgressiveFuture()
        f.task_canceller = self._cancel_fast_acquire
        f._stream = st
        if last_ss:
            nb -= 1
            extra_dur = acq.estimateTime([st] + last_ss)
        else:
            extra_dur = 0
        self._hijack_live_stream(st, f, nb, fn_pat, extra_dur)

        try:
            # Start acquisition and wait until it's done
            f.set_running_or_notify_cancel(
            )  # Indicate the work is starting now
            dlg.showProgress(f)
            st.is_active.value = True
            self._acq_completed.wait()

            if f.cancelled():
                dlg.resumeSettings()
                return
        finally:
            st.is_active.value = False  # just to be extra sure it's stopped
            logging.debug("Restoring stream %s", st)
            self._restore_live_stream(st)

        # last "normal" acquisition, if needed
        if last_ss:
            logging.debug("Acquiring last acquisition, with all the streams")
            ss = [st] + last_ss
            f.set_progress(end=time.time() + acq.estimateTime(ss))
            das, e = acq.acquire(
                ss, self.main_app.main_data.settings_obs).result()
            self._save_data(fn_pat % (nb, ), das)

        self._stop_saving_threads()  # Wait for all the data to be stored
        f.set_result(None)  # Indicate it's over
Exemplo n.º 18
0
    def test_sync_sem_ccd(self):
        """
        try acquisition with fairly complex SEM/CCD stream
        """
        # Create the streams and streamTree
        semsur = stream.SEMStream("test sem", self.sed, self.sed.data,
                                  self.ebeam)
        sems = stream.SEMStream("test sem cl", self.sed, self.sed.data,
                                self.ebeam)
        ars = stream.ARSettingsStream("test ar", self.ccd, self.ccd.data,
                                      self.ebeam)
        semars = stream.SEMARMDStream("test SEM/AR", [sems, ars])
        st = stream.StreamTree(streams=[semsur, semars])

        # SEM survey settings are via the current hardware settings
        self.ebeam.dwellTime.value = self.ebeam.dwellTime.range[0]

        # SEM/AR settings are via the AR stream
        ars.roi.value = (0.1, 0.1, 0.8, 0.8)
        mx_brng = self.ccd.binning.range[1]
        binning = tuple(min(4, mx) for mx in mx_brng)  # try binning 4x4
        self.ccd.binning.value = binning
        self.ccd.exposureTime.value = 1  # s
        ars.repetition.value = (2, 3)
        num_ar = numpy.prod(ars.repetition.value)

        est_time = acq.estimateTime(st.getProjections())

        # prepare callbacks
        self.start = None
        self.end = None
        self.updates = 0
        self.done = 0

        # Run acquisition
        start = time.time()
        f = acq.acquire(st.getProjections())
        f.add_update_callback(self.on_progress_update)
        f.add_done_callback(self.on_done)

        data, e = f.result()
        dur = time.time() - start
        self.assertGreaterEqual(dur, est_time /
                                2)  # Estimated time shouldn't be too small
        self.assertIsInstance(data[0], model.DataArray)
        self.assertIsNone(e)
        self.assertEqual(len(data), num_ar + 2)

        thumb = acq.computeThumbnail(st, f)
        self.assertIsInstance(thumb, model.DataArray)

        self.assertGreaterEqual(self.updates, 1)  # at least one update at end
        self.assertLessEqual(self.end, time.time())
        self.assertTrue(not f.cancelled())

        time.sleep(0.1)
        self.assertEqual(self.done, 1)
Exemplo n.º 19
0
    def test_sync_sem_ccd(self):
        """
        try acquisition with fairly complex SEM/CCD stream
        """
        # Create the streams and streamTree
        semsur = stream.SEMStream("test sem", self.sed, self.sed.data, self.ebeam)
        sems = stream.SEMStream("test sem cl", self.sed, self.sed.data, self.ebeam)
        ars = stream.ARSettingsStream("test ar", self.ccd, self.ccd.data, self.ebeam)
        semars = stream.SEMARMDStream("test SEM/AR", [sems, ars])
        st = stream.StreamTree(streams=[semsur, semars])

        # SEM survey settings are via the current hardware settings
        self.ebeam.dwellTime.value = self.ebeam.dwellTime.range[0]

        # SEM/AR settings are via the AR stream
        ars.roi.value = (0.1, 0.1, 0.8, 0.8)
        mx_brng = self.ccd.binning.range[1]
        binning = tuple(min(4, mx) for mx in mx_brng) # try binning 4x4
        self.ccd.binning.value = binning
        self.ccd.exposureTime.value = 1 # s
        ars.repetition.value = (2, 3)
        num_ar = numpy.prod(ars.repetition.value)

        est_time = acq.estimateTime(st.getProjections())

        # prepare callbacks
        self.start = None
        self.end = None
        self.updates = 0
        self.done = 0

        # Run acquisition
        start = time.time()
        f = acq.acquire(st.getProjections())
        f.add_update_callback(self.on_progress_update)
        f.add_done_callback(self.on_done)

        data, e = f.result()
        dur = time.time() - start
        self.assertGreaterEqual(dur, est_time / 2) # Estimated time shouldn't be too small
        self.assertIsInstance(data[0], model.DataArray)
        self.assertIsNone(e)
        self.assertEqual(len(data), num_ar + 2)

        thumb = acq.computeThumbnail(st, f)
        self.assertIsInstance(thumb, model.DataArray)

        self.assertGreaterEqual(self.updates, 1) # at least one update at end
        self.assertLessEqual(self.end, time.time())
        self.assertTrue(not f.cancelled())

        time.sleep(0.1)
        self.assertEqual(self.done, 1)
Exemplo n.º 20
0
    def test_metadata(self):
        """
        Check if extra metadata are saved
        """
        settings_obs = SettingsObserver(model.getComponents())

        detvas = {"binning", "exposureTime"}
        sems = stream.SEMStream("test sem", self.sed, self.sed.data,
                                self.ebeam)
        specs = stream.SpectrumSettingsStream("test spec",
                                              self.spec,
                                              self.spec.data,
                                              self.ebeam,
                                              detvas=detvas)
        sps = stream.SEMSpectrumMDStream("test sem-spec", [sems, specs])

        specs.roi.value = (0, 0, 1, 1)
        specs.repetition.value = (2, 3)
        specs.detBinning.value = (2, specs.detBinning.value[1])
        specs.detExposureTime.value = 0.1

        specs2 = stream.SpectrumSettingsStream("test spec2",
                                               self.spec,
                                               self.spec.data,
                                               self.ebeam,
                                               detvas=detvas)
        sps2 = stream.SEMSpectrumMDStream("test sem-spec2", [sems, specs2])

        specs2.roi.value = (0, 0, 1, 1)
        specs2.repetition.value = (2, 3)
        specs2.detBinning.value = (4, specs2.detBinning.value[1])
        specs2.detExposureTime.value = 0.05

        f = acq.acquire([sps, sps2], settings_obs)
        data = f.result()

        spec1_data = data[0][1]
        spec2_data = data[0][3]
        self.assertEqual(
            spec1_data.metadata[model.MD_EXTRA_SETTINGS][self.spec.name]
            ['binning'], [(2, specs.detBinning.value[1]), 'px'])
        self.assertEqual(
            spec2_data.metadata[model.MD_EXTRA_SETTINGS][self.spec.name]
            ['binning'], [(4, specs2.detBinning.value[1]), 'px'])
        self.assertEqual(
            spec1_data.metadata[model.MD_EXTRA_SETTINGS][self.spec.name]
            ['exposureTime'], [0.1, 's'])
        self.assertEqual(
            spec2_data.metadata[model.MD_EXTRA_SETTINGS][self.spec.name]
            ['exposureTime'], [0.05, 's'])
Exemplo n.º 21
0
    def _acquire(self, dlg):
        """
        Starts the synchronized acquisition, pauses the currently playing streams and exports the
        acquired SEM data. Opens the survey, concurrent and first optical image in the analysis tab.
        :param dlg: (AcquisitionDialog) The plugin window.
        """
        self._dlg.streambar_controller.pauseStreams()
        self.save_hw_settings()

        self.fns = []

        strs = [self._survey_stream, self._secom_sem_cl_stream]

        fn = self.filename.value
        fn_prefix, fn_ext = os.path.splitext(self.filename.value)

        try:
            f = acq.acquire(strs)
            dlg.showProgress(f)
            das, e = f.result(
            )  # blocks until all the acquisitions are finished
        except CancelledError:
            pass
        finally:
            self.resume_hw_settings()

        if not f.cancelled() and das:
            if e:
                logging.warning("SECOM CL acquisition failed: %s", e)
            logging.debug("Will save CL data to %s", fn)

            # export the SEM images
            self.save_data(das,
                           prefix=fn_prefix,
                           xres=self.repetition.value[0],
                           yres=self.repetition.value[1],
                           xstepsize=self.pixelSize.value[0] * 1e9,
                           ystepsize=self.pixelSize.value[1] * 1e9,
                           idx=0)

            # Open analysis tab, with 3 files
            self.showAcquisition(self._secom_sem_cl_stream.firstOptImg)
            analysis_tab = self.main_data.getTabByName('analysis')
            for fn_img in self.fns:
                analysis_tab.load_data(fn_img, extend=True)

        dlg.Close()
Exemplo n.º 22
0
    def on_acquire(self, evt):
        """ Start the actual acquisition """
        if self.last_saved_file:  # This means the button is actually "View"
            self._view_file()
            return

        logging.info("Acquire button clicked, starting acquisition")
        self.acquiring = True

        self.btn_secom_acquire.Disable()

        # disable estimation time updates during acquisition
        self._view.lastUpdate.unsubscribe(self.on_streams_changed)

        # Freeze all the settings so that it's not possible to change anything
        self._pause_settings()

        self.gauge_acq.Show()
        self.Layout()  # to put the gauge at the right place

        # For now, always indicate the best quality (even if the preset is set
        # to "live")
        if self._main_data_model.opm:
            self._main_data_model.opm.setAcqQuality(path.ACQ_QUALITY_BEST)

        # Note: It should never be possible to reach here with no streams
        streams = self.get_acq_streams()
        v_streams = self._view.getStreams()  # visible streams
        for s in streams:
            # Add extra viewable streams to view. However, do not add incompatible streams.
            if s not in v_streams and not isinstance(s, NON_SPATIAL_STREAMS):
                self._view.addStream(s)

            # Update the filename in the streams
            if hasattr(s, "filename"):
                pathname, base = os.path.split(self.filename.value)
                s.filename.value = base

        self.acq_future = acq.acquire(streams,
                                      self._main_data_model.settings_obs)
        self._acq_future_connector = ProgressiveFutureConnector(
            self.acq_future, self.gauge_acq, self.lbl_acqestimate)
        self.acq_future.add_done_callback(self.on_acquisition_done)

        self.btn_cancel.SetLabel("Cancel")
        self.btn_cancel.Bind(wx.EVT_BUTTON, self.on_cancel)
Exemplo n.º 23
0
    def on_acquire(self, evt):
        """ Start the actual acquisition """
        if self.last_saved_file:  # This means the button is actually "View"
            self._view_file()
            return

        logging.info("Acquire button clicked, starting acquisition")
        self.acquiring = True

        self.btn_secom_acquire.Disable()

        # disable estimation time updates during acquisition
        self._view.lastUpdate.unsubscribe(self.on_streams_changed)

        # Freeze all the settings so that it's not possible to change anything
        self._pause_settings()

        self.gauge_acq.Show()
        self.Layout()  # to put the gauge at the right place

        # For now, always indicate the best quality (even if the preset is set
        # to "live")
        if self._main_data_model.opm:
            self._main_data_model.opm.setAcqQuality(path.ACQ_QUALITY_BEST)

        # Note: It should never be possible to reach here with no streams
        streams = self.get_acq_streams()
        v_streams = self._view.getStreams()  # visible streams
        for s in streams:
            # Add extra viewable streams to view. However, do not add incompatible streams.
            if s not in v_streams and not isinstance(s, NON_SPATIAL_STREAMS):
                self._view.addStream(s)

            # Update the filename in the streams
            if hasattr(s, "filename"):
                pathname, base = os.path.split(self.filename.value)
                s.filename.value = base

        self.acq_future = acq.acquire(streams)
        self._acq_future_connector = ProgressiveFutureConnector(self.acq_future,
                                                                self.gauge_acq,
                                                                self.lbl_acqestimate)
        self.acq_future.add_done_callback(self.on_acquisition_done)

        self.btn_cancel.SetLabel("Cancel")
        self.btn_cancel.Bind(wx.EVT_BUTTON, self.on_cancel)
Exemplo n.º 24
0
    def acquire(self, dlg):
        nb = self.numberOfAcquisitions.value
        p = self.period.value
        sacqt = self._stream.estimateAcquisitionTime()
        intp = max(0, p - sacqt)
        if p < sacqt:
            logging.warning(
                "Acquisition will take %g s, but period between acquisition must be only %g s",
                sacqt, p)

        exporter = dataio.find_fittest_converter(self.filename.value)

        f = model.ProgressiveFuture()
        f.task_canceller = lambda l: True  # To allow cancelling while it's running
        f.set_running_or_notify_cancel()  # Indicate the work is starting now
        dlg.showProgress(f)

        das = []
        for i in range(nb):
            left = nb - i
            dur = sacqt * left + intp * (left - 1)
            startt = time.time()
            f.set_progress(end=startt + dur)
            d, e = acq.acquire([self._stream]).result()
            das.extend(d)
            if f.cancelled():
                return

            # Wait the period requested, excepted the last time
            if left > 1:
                sleept = (startt + p) - time.time()
                if sleept > 0:
                    time.sleep(sleept)
                else:
                    logging.info(
                        "Immediately starting next acquisition, %g s late",
                        -sleept)

        exporter.export(self.filename.value, das)
        f.set_result(None)  # Indicate it's over

        # self.showAcquisition(self.filename.value)
        dlg.Destroy()
Exemplo n.º 25
0
    def test_progress(self):
        """
        Check we get some progress updates
        """
        # create a little complex streamTree
        st = stream.StreamTree(streams=[
                self.streams[0],
                stream.StreamTree(streams=self.streams[1:3])
                ])
        self.past = None
        self.left = None
        self.updates = 0

        f = acq.acquire(st.getStreams())
        f.add_update_callback(self.on_progress_update)

        data = f.result()
        self.assertIsInstance(data[0], model.DataArray)
        self.assertGreaterEqual(self.updates, 3) # at least one update per stream
Exemplo n.º 26
0
    def acquire(self, dlg):
        nb = self.numberOfAcquisitions.value
        p = self.period.value
        sacqt = self._stream.estimateAcquisitionTime()
        intp = max(0, p - sacqt)
        if p < sacqt:
            logging.warning(
                "Acquisition will take %g s, but period between acquisition must be only %g s",
                sacqt, p
            )

        exporter = dataio.find_fittest_converter(self.filename.value)

        f = model.ProgressiveFuture()
        f.task_canceller = lambda l: True  # To allow cancelling while it's running
        f.set_running_or_notify_cancel()  # Indicate the work is starting now
        dlg.showProgress(f)

        das = []
        for i in range(nb):
            left = nb - i
            dur = sacqt * left + intp * (left - 1)
            startt = time.time()
            f.set_progress(end=startt + dur)
            d, e = acq.acquire([self._stream]).result()
            das.extend(d)
            if f.cancelled():
                return

            # Wait the period requested, excepted the last time
            if left > 1:
                sleept = (startt + p) - time.time()
                if sleept > 0:
                    time.sleep(sleept)
                else:
                    logging.info("Immediately starting next acquisition, %g s late", -sleept)

        exporter.export(self.filename.value, das)
        f.set_result(None)  # Indicate it's over

        # self.showAcquisition(self.filename.value)
        dlg.Destroy()
Exemplo n.º 27
0
    def on_acquire(self, evt):
        """ Start the actual acquisition """
        if self.last_saved_file:  # This means the button is actually "View"
            self._view_file()
            return

        logging.info("Acquire button clicked, starting acquisition")

        self.btn_secom_acquire.Disable()

        # disable estimation time updates during acquisition
        self._view.lastUpdate.unsubscribe(self.on_streams_changed)

        # TODO: freeze all the settings so that it's not possible to change anything
        self._pause_settings()

        self.gauge_acq.Show()
        self.Layout()  # to put the gauge at the right place

        # start acquisition + connect events to callback
        streams = self._view.getStreams()

        # Add the overlay stream if the fine alignment check box is checked
        if self.chkbox_fine_align.Value:
            streams.append(self._ovrl_stream)

        # Turn off the fan to avoid vibrations (in all acquisitions)
        self._set_fan(False)

        # It should never be possible to reach here with no streams
        self.acq_future = acq.acquire(streams)
        self._acq_future_connector = ProgressiveFutureConnector(self.acq_future,
                                                                self.gauge_acq,
                                                                self.lbl_acqestimate)
        self.acq_future.add_done_callback(self.on_acquisition_done)

        self.btn_cancel.SetLabel("Cancel")
        self.btn_cancel.Bind(wx.EVT_BUTTON, self.on_cancel)
Exemplo n.º 28
0
    def _acquire(self, dlg, future):
        # Stop the streams
        dlg.streambar_controller.pauseStreams()

        # Acquire (even if it was live, to be sure it's the data is up-to-date)
        ss = self._get_acq_streams()
        dur = acq.estimateTime(ss)
        startt = time.time()
        future._cur_f = InstantaneousFuture()
        future.task_canceller = self._acq_canceller
        future.set_running_or_notify_cancel()  # Indicate the work is starting now
        future.set_progress(end=startt + dur)
        dlg.showProgress(future)

        future._cur_f = acq.acquire(ss)
        das, e = future._cur_f.result()
        if future.cancelled():
            raise CancelledError()

        if e:
            raise e

        return das
Exemplo n.º 29
0
    def on_acquisition(self, evt):
        """
        Start the acquisition (really)
        Similar to win.acquisition.on_acquire()
        """
        self._pause_streams()

        self.btn_acquire.Disable()
        self.btn_cancel.Enable()
        self._main_data_model.is_acquiring.value = True

        self.gauge_acq.Show()
        self.btn_cancel.Show()
        self._show_status_icons(None)
        self._tab_panel.Layout()  # to put the gauge at the right place

        # start acquisition + connect events to callback
        streams = self._tab_data_model.acquisitionView.getStreams()

        self.acq_future = acq.acquire(streams)
        self._acq_future_connector = ProgressiveFutureConnector(
            self.acq_future, self.gauge_acq, self.lbl_acqestimate)
        self.acq_future.add_done_callback(self.on_acquisition_done)
Exemplo n.º 30
0
    def on_acquire(self, evt):
        """ Start the actual acquisition """
        if self.last_saved_file:  # This means the button is actually "View"
            self._view_file()
            return

        logging.info("Acquire button clicked, starting acquisition")

        self.btn_secom_acquire.Disable()

        # disable estimation time updates during acquisition
        self._view.lastUpdate.unsubscribe(self.on_streams_changed)

        # TODO: freeze all the settings so that it's not possible to change anything
        self._pause_settings()

        self.gauge_acq.Show()
        self.Layout()  # to put the gauge at the right place

        # start acquisition + connect events to callback
        streams = self._view.getStreams()

        # Add the overlay stream if the fine alignment check box is checked
        if self.chkbox_fine_align.Value:
            streams.append(self._ovrl_stream)

        # Turn off the fan to avoid vibrations (in all acquisitions)
        self._set_fan(False)

        # It should never be possible to reach here with no streams
        self.acq_future = acq.acquire(streams)
        self._acq_future_connector = ProgressiveFutureConnector(
            self.acq_future, self.gauge_acq, self.lbl_acqestimate)
        self.acq_future.add_done_callback(self.on_acquisition_done)

        self.btn_cancel.SetLabel("Cancel")
        self.btn_cancel.Bind(wx.EVT_BUTTON, self.on_cancel)
Exemplo n.º 31
0
    def on_acquisition(self, evt):
        """
        Start the acquisition (really)
        Similar to win.acquisition.on_acquire()
        """
        self._pause_settings()
        self.btn_acquire.Disable()
        self.btn_cancel.Enable()

        self.gauge_acq.Show()
        self.btn_cancel.Show()

        self._main_data_model.is_acquiring.value = True

        # FIXME: probably not the whole window is required, just the file settings
        self._main_frame.Layout()  # to put the gauge at the right place

        # start acquisition + connect events to callback
        streams = self._tab_data_model.acquisitionView.getStreams()

        self.acq_future = acq.acquire(streams)
        self._acq_future_connector = ProgessiveFutureConnector(
            self.acq_future, self.gauge_acq, self.lbl_acqestimate)
        self.acq_future.add_done_callback(self.on_acquisition_done)
Exemplo n.º 32
0
    def _acquire(self, dlg, future):
        # Stop the streams
        dlg.streambar_controller.pauseStreams()

        # Acquire (even if it was live, to be sure it's the data is up-to-date)
        ss = self._get_acq_streams()
        dur = acq.estimateTime(ss)
        startt = time.time()
        future._cur_f = InstantaneousFuture()
        future.task_canceller = self._acq_canceller
        future.set_running_or_notify_cancel(
        )  # Indicate the work is starting now
        future.set_progress(end=startt + dur)
        dlg.showProgress(future)

        future._cur_f = acq.acquire(ss, self.main_app.main_data.settings_obs)
        das, e = future._cur_f.result()
        if future.cancelled():
            raise CancelledError()

        if e:
            raise e

        return das
Exemplo n.º 33
0
def acquire_timelapse(num, period, filename):
    """
    num (int or None): if None, will never stop, unless interrupted
    """

    # Find components by their role
    ccd = model.getComponent(role="ccd")
    ebeam = model.getComponent(role="e-beam")
    sed = model.getComponent(role="se-detector")
    light = model.getComponent(role="light")
    light_filter = model.getComponent(role="filter")
    stage = model.getComponent(role="stage")
    focus = model.getComponent(role="focus")

    # Prepare the streams and acquisition manager
    # The settings of the emissions and excitation are based on the current
    # hardware settings.
    stfm = stream.FluoStream("Fluorescence image", ccd, ccd.data, light, light_filter)
    # Force the excitation light using that command:
    # stfm.excitation.value = (4.72e-07, 4.79e-07, 4.85e-07, 4.91e-07, 4.97e-07)
    stem = stream.SEMStream("Secondary electrons", sed, sed.data, ebeam)
    # Special stream that will run the overlay and update the metadata based on this
    # Note: if more complex overlay is needed (eg, with background subtraction,
    # or with saving the CCD image), we'd need to directly call FindOverlay())
    stovl = stream.OverlayStream("Overlay", ccd, ebeam, sed)
    stovl.dwellTime.value = OVERLAY_DT

    acq_streams = [stem, stfm, stovl]
    
    # Prepare to save each acquisition in a separate file
    exporter = dataio.find_fittest_converter(filename)
    basename, ext = os.path.splitext(filename)
    fn_pattern = basename + "%04d" + ext

    fn_pos = basename + "pos.csv"
    fpos = open(fn_pos, "a")
    fpos.write("time\tX\tY\tZ\n")

    # Run acquisition every period
    try:
        i = 1
        while True:
            logging.info("Acquiring image %d", i)
            start = time.time()

            # Acquire all the images
            f = acq.acquire(acq_streams)
            data, e = f.result()
            if e:
                logging.error("Acquisition failed with %s", e)
                # It can partially fail, so still allow to save the data successfully acquired

            # Note: the actual time of the position is the one when the position was read
            # by the pigcs driver.
            spos = stage.position.value
            fpos.write("%.20g\t%g\t%g\t%g\n" %
                       (time.time(), spos["x"], spos["y"], focus.position.value["z"]))

            # Save the file
            if data:
                exporter.export(fn_pattern % (i,), data)

            # TODO: run autofocus from time to time?

            left = period - (time.time() - start)
            if left < 0:
                logging.warning("Acquisition took longer than the period (%g s overdue)", -left)
            else:
                logging.info("Sleeping for another %g s", left)
                time.sleep(left)

            if i == num:  # will never be True if num is None
                break
            i += 1

    except KeyboardInterrupt:
        logging.info("Closing after only %d images acquired", i)
    except Exception:
        logging.exception("Failed to acquire all the images.")
        raise

    fpos.close()
Exemplo n.º 34
0
def acquire_timelapse(num, period, filename):
    """
    num (int or None): if None, will never stop, unless interrupted
    """

    # Find components by their role
    ccd = model.getComponent(role="ccd")
    ebeam = model.getComponent(role="e-beam")
    sed = model.getComponent(role="se-detector")
    light = model.getComponent(role="light")
    light_filter = model.getComponent(role="filter")
    stage = model.getComponent(role="stage")
    focus = model.getComponent(role="focus")

    # Prepare the streams and acquisition manager
    # The settings of the emissions and excitation are based on the current
    # hardware settings.
    stfm = stream.FluoStream("Fluorescence image", ccd, ccd.data, light, light_filter)
    # Force the excitation light using that command:
    # stfm.excitation.value = (4.72e-07, 4.79e-07, 4.85e-07, 4.91e-07, 4.97e-07)
    stem = stream.SEMStream("Secondary electrons", sed, sed.data, ebeam)
    # Special stream that will run the overlay and update the metadata based on this
    # Note: if more complex overlay is needed (eg, with background subtraction,
    # or with saving the CCD image), we'd need to directly call FindOverlay())
    stovl = stream.OverlayStream("Overlay", ccd, ebeam, sed)
    stovl.dwellTime.value = OVERLAY_DT

    acq_streams = [stem, stfm, stovl]
    
    # Prepare to save each acquisition in a separate file
    exporter = dataio.find_fittest_converter(filename)
    basename, ext = os.path.splitext(filename)
    fn_pattern = basename + "%04d" + ext

    fn_pos = basename + "pos.csv"
    fpos = open(fn_pos, "a")
    fpos.write("time\tX\tY\tZ\n")

    # Run acquisition every period
    try:
        i = 1
        while True:
            logging.info("Acquiring image %d", i)
            start = time.time()

            # Acquire all the images
            f = acq.acquire(acq_streams)
            data, e = f.result()
            if e:
                logging.error("Acquisition failed with %s", e)
                # It can partially fail, so still allow to save the data successfully acquired

            # Note: the actual time of the position is the one when the position was read
            # by the pigcs driver.
            spos = stage.position.value
            fpos.write("%.20g\t%g\t%g\t%g\n" %
                       (time.time(), spos["x"], spos["y"], focus.position.value["z"]))

            # Save the file
            if data:
                exporter.export(fn_pattern % (i,), data)

            # TODO: run autofocus from time to time?

            left = period - (time.time() - start)
            if left < 0:
                logging.warning("Acquisition took longer than the period (%g s overdue)", -left)
            else:
                logging.info("Sleeping for another %g s", left)
                time.sleep(left)

            if i == num:  # will never be True if num is None
                break
            i += 1

    except KeyboardInterrupt:
        logging.info("Closing after only %d images acquired", i)
    except Exception:
        logging.exception("Failed to acquire all the images.")
        raise

    fpos.close()
Exemplo n.º 35
0
    def test_leech(self):
        """
        try acquisition with leech
        """
        # Create the streams and streamTree
        semsur = stream.SEMStream("test sem", self.sed, self.sed.data, self.ebeam)
        sems = stream.SEMStream("test sem cl", self.sed, self.sed.data, self.ebeam)
        ars = stream.ARSettingsStream("test ar", self.ccd, self.ccd.data, self.ebeam)
        semars = stream.SEMARMDStream("test SEM/AR", [sems, ars])
        st = stream.StreamTree(streams=[semsur, semars])

        pcd = Fake0DDetector("test")
        pca = ProbeCurrentAcquirer(pcd)
        sems.leeches.append(pca)
        semsur.leeches.append(pca)

        # SEM survey settings are via the current hardware settings
        self.ebeam.dwellTime.value = self.ebeam.dwellTime.range[0]

        # SEM/AR settings are via the AR stream
        ars.roi.value = (0.1, 0.1, 0.8, 0.8)
        mx_brng = self.ccd.binning.range[1]
        binning = tuple(min(4, mx) for mx in mx_brng)  # try binning 4x4
        self.ccd.binning.value = binning
        self.ccd.exposureTime.value = 1  # s
        ars.repetition.value = (2, 3)
        num_ar = numpy.prod(ars.repetition.value)

        pca.period.value = 10  # Only at beginning and end

        est_time = acq.estimateTime(st.getProjections())

        # prepare callbacks
        self.start = None
        self.end = None
        self.updates = 0
        self.done = 0

        # Run acquisition
        start = time.time()
        f = acq.acquire(st.getProjections())
        f.add_update_callback(self.on_progress_update)
        f.add_done_callback(self.on_done)

        data, e = f.result()
        dur = time.time() - start
        self.assertGreaterEqual(dur, est_time / 2)  # Estimated time shouldn't be too small
        self.assertIsInstance(data[0], model.DataArray)
        self.assertIsNone(e)
        self.assertEqual(len(data), num_ar + 2)

        thumb = acq.computeThumbnail(st, f)
        self.assertIsInstance(thumb, model.DataArray)

        self.assertGreaterEqual(self.updates, 1)  # at least one update at end
        self.assertLessEqual(self.end, time.time())
        self.assertTrue(not f.cancelled())

        time.sleep(0.1)
        self.assertEqual(self.done, 1)

        for da in data:
            pcmd = da.metadata[model.MD_EBEAM_CURRENT_TIME]
            self.assertEqual(len(pcmd), 2)
Exemplo n.º 36
0
    def acquire(self, dlg):
        main_data = self.main_app.main_data
        str_ctrl = self._tab.streambar_controller
        str_ctrl.pauseStreams()
        dlg.pauseSettings()
        self._unsubscribe_vas()

        orig_pos = main_data.stage.position.value
        trep = (self.nx.value, self.ny.value)
        nb = trep[0] * trep[1]
        # It's not a big deal if it was a bad guess as we'll use the actual data
        # before the first move
        sfov = self._guess_smallest_fov()
        fn = self.filename.value
        exporter = dataio.find_fittest_converter(fn)
        fn_bs, fn_ext = udataio.splitext(fn)

        ss, stitch_ss = self._get_acq_streams()
        end = self.estimate_time() + time.time()

        ft = model.ProgressiveFuture(end=end)
        self.ft = ft  # allows future to be canceled in show_dlg after closing window
        ft.running_subf = model.InstantaneousFuture()
        ft._task_state = RUNNING
        ft._task_lock = threading.Lock()
        ft.task_canceller = self._cancel_acquisition  # To allow cancelling while it's running
        ft.set_running_or_notify_cancel()  # Indicate the work is starting now
        dlg.showProgress(ft)

        # For stitching only
        da_list = []  # for each position, a list of DataArrays
        i = 0
        prev_idx = [0, 0]
        try:
            for ix, iy in self._generate_scanning_indices(trep):
                logging.debug("Acquiring tile %dx%d", ix, iy)
                self._move_to_tile((ix, iy), orig_pos, sfov, prev_idx)
                prev_idx = ix, iy
                # Update the progress bar
                ft.set_progress(end=self.estimate_time(nb - i) + time.time())

                ft.running_subf = acq.acquire(ss)
                das, e = ft.running_subf.result()  # blocks until all the acquisitions are finished
                if e:
                    logging.warning("Acquisition for tile %dx%d partially failed: %s",
                                    ix, iy, e)

                if ft._task_state == CANCELLED:
                    raise CancelledError()

                # TODO: do in a separate thread
                fn_tile = "%s-%.5dx%.5d%s" % (fn_bs, ix, iy, fn_ext)
                logging.debug("Will save data of tile %dx%d to %s", ix, iy, fn_tile)
                exporter.export(fn_tile, das)

                if ft._task_state == CANCELLED:
                    raise CancelledError()

                if self.stitch.value:
                    # Sort tiles (largest sem on first position)
                    da_list.append(self.sort_das(das, stitch_ss))

                # Check the FoV is correct using the data, and if not update
                if i == 0:
                    sfov = self._check_fov(das, sfov)
                i += 1

            # Move stage to original position
            main_data.stage.moveAbs(orig_pos)

            # Stitch SEM and CL streams
            st_data = []
            if self.stitch.value and (not da_list or not da_list[0]):
                # if only AR or Spectrum are acquired
                logging.warning("No stream acquired that can be used for stitching.")
            elif self.stitch.value:
                logging.info("Acquisition completed, now stitching...")
                ft.set_progress(end=self.estimate_time(0) + time.time())

                logging.info("Computing big image out of %d images", len(da_list))
                das_registered = stitching.register(da_list)

                # Select weaving method
                # On a Sparc system the mean weaver gives the best result since it
                # smoothes the transitions between tiles. However, using this weaver on the
                # Secom/Delphi generates an image with dark stripes in the overlap regions which are
                # the result of carbon decomposition effects that typically occur in samples imaged
                # by these systems. To mediate this, we use the
                # collage_reverse weaver that only shows the overlap region of the tile that
                # was imaged first.
                if self.microscope.role in ("secom", "delphi"):
                    weaving_method = WEAVER_COLLAGE_REVERSE
                    logging.info("Using weaving method WEAVER_COLLAGE_REVERSE.")
                else:
                    weaving_method = WEAVER_MEAN
                    logging.info("Using weaving method WEAVER_MEAN.")

                # Weave every stream
                if isinstance(das_registered[0], tuple):
                    for s in range(len(das_registered[0])):
                        streams = []
                        for da in das_registered:
                            streams.append(da[s])
                        da = stitching.weave(streams, weaving_method)
                        da.metadata[model.MD_DIMS] = "YX"  # TODO: do it in the weaver
                        st_data.append(da)
                else:
                    da = stitching.weave(das_registered, weaving_method)
                    st_data.append(da)

                # Save
                exporter = dataio.find_fittest_converter(fn)
                if exporter.CAN_SAVE_PYRAMID:
                    exporter.export(fn, st_data, pyramid=True)
                else:
                    logging.warning("File format doesn't support saving image in pyramidal form")
                    exporter.export(fn, st_data)

            ft.set_result(None)  # Indicate it's over

            # End of the (completed) acquisition
            if ft._task_state == CANCELLED:
                raise CancelledError()
            dlg.Close()

            # Open analysis tab
            if st_data:
                self.showAcquisition(fn)

            # TODO: also export a full image (based on reported position, or based
            # on alignment detection)
        except CancelledError:
            logging.debug("Acquisition cancelled")
            dlg.resumeSettings()
        except Exception as ex:
            logging.exception("Acquisition failed.")
            ft.running_subf.cancel()
            ft.set_result(None)
            # Show also in the window. It will be hidden next time a setting is changed.
            self._dlg.setAcquisitionInfo("Acquisition failed: %s" % (ex,),
                                         lvl=logging.ERROR)
        finally:
            logging.info("Tiled acquisition ended")
            main_data.stage.moveAbs(orig_pos)
Exemplo n.º 37
0
    def acquire(self, dlg):
        # Stop the spot stream and any other stream playing to not interfere with the acquisition
        self._pause_streams()

        # We use the acquisition CL intensity stream, so there is a concurrent
        # SEM acquisition (just the survey). The drift correction is run both
        # during the acquisition, and in-between each acquisition. The drift
        # between each acquisition is corrected by updating the metadata. So
        # it's some kind of post-processing compensation. The advantage is that
        # it doesn't affect the data, and if the entire field of view is imaged,
        # it still works properly, but when opening in another software (eg,
        # ImageJ), that compensation will not be applied automatically).
        # Alternatively, the images could be cropped to just the region which is
        # common for all the acquisitions, but there might then be data loss.
        # Note: The compensation could also be done by updating the ROI of the
        # CL stream. However, in the most common case, the user will acquire the
        # entire area, so drift compensation cannot be applied. We could also
        # use SEM concurrent stream and measure drift afterwards but that
        # doubles the dwell time).
        dt_survey, dt_clint, dt_drift = self._calc_acq_times()

        das = []
        fn = self.filename.value
        exporter = dataio.find_fittest_converter(fn)

        # Prepare the Future to represent the acquisition progress, and cancel
        dur = self.expectedDuration.value
        end = time.time() + dur
        ft = model.ProgressiveFuture(end=end)

        # Allow to cancel by cancelling also the sub-task
        def canceller(future):
            # To be absolutely correct, there should be a lock, however, in
            # practice in the worse case the task will run a little longer before
            # stopping.
            if future._subf:
                logging.debug("Cancelling sub future %s", future._subf)
                return future._subf.cancel()

        ft._subf = None  # sub-future corresponding to the task currently happening
        ft.task_canceller = canceller  # To allow cancelling while it's running

        # Indicate the work is starting now
        ft.set_running_or_notify_cancel()
        dlg.showProgress(ft)

        try:
            # acquisition of SEM survey
            if self._survey_s:
                ft._subf = acq.acquire([self._survey_s],
                                       self.main_app.main_data.settings_obs)
                d, e = ft._subf.result()
                das.extend(d)
                if e:
                    raise e

            if ft.cancelled():
                raise CancelledError()

            dur -= dt_survey
            ft.set_progress(end=time.time() + dur)

            # Extra drift correction between each filter
            dc_roi = self._acqui_tab.driftCorrector.roi.value
            dc_dt = self._acqui_tab.driftCorrector.dwellTime.value

            # drift correction vector
            tot_dc_vect = (0, 0)
            if dc_roi != UNDEFINED_ROI:
                drift_est = drift.AnchoredEstimator(self.ebeam, self.sed,
                                                    dc_roi, dc_dt)
                drift_est.acquire()
                dur -= dt_drift
                ft.set_progress(end=time.time() + dur)
            else:
                drift_est = None

            # Loop over the filters, for now it's fixed to 3 but this could be flexible
            for fb, co in zip(self._filters, self._colours):
                logging.info("Moving to band %s with component %s", fb.value,
                             self.filterwheel.name)
                ft._subf = self.filterwheel.moveAbs({"band": fb.value})
                ft._subf.result()
                if ft.cancelled():
                    raise CancelledError()
                ft.set_progress(end=time.time() + dur)

                # acquire CL stream
                ft._subf = acq.acquire([self._cl_int_s],
                                       self.main_app.main_data.settings_obs)
                d, e = ft._subf.result()
                if e:
                    raise e
                if ft.cancelled():
                    raise CancelledError()
                dur -= dt_clint
                ft.set_progress(end=time.time() + dur)

                if drift_est:
                    drift_est.acquire()
                    dc_vect = drift_est.estimate()
                    pxs = self.ebeam.pixelSize.value
                    tot_dc_vect = (tot_dc_vect[0] + dc_vect[0] * pxs[0],
                                   tot_dc_vect[1] - dc_vect[1] * pxs[1]
                                   )  # Y is inverted in physical coordinates
                    dur -= dt_drift
                    ft.set_progress(end=time.time() + dur)

                # Convert the CL intensity stream into a "fluo" stream so that it's nicely displayed (in colour) in the viewer
                for da in d:
                    # Update the center position based on drift
                    pos = da.metadata[model.MD_POS]
                    logging.debug("Correcting position for drift by %s m",
                                  tot_dc_vect)
                    pos = tuple(p + dc for p, dc in zip(pos, tot_dc_vect))
                    da.metadata[model.MD_POS] = pos

                    if model.MD_OUT_WL not in da.metadata:
                        # check it's not the SEM concurrent stream
                        continue
                    # Force the colour, which forces it to be a FluoStream when
                    # opening it in the analysis tab, for nice colour merging.
                    da.metadata[model.MD_USER_TINT] = co

                das.extend(d)
                if ft.cancelled():
                    raise CancelledError()

            ft.set_result(None)  # Indicate it's over

        except CancelledError as ex:
            logging.debug("Acquisition cancelled")
            return
        except Exception as ex:
            logging.exception("Failure during RGB CL acquisition")
            ft.set_exception(ex)
            # TODO: show the error in the plugin window
            return

        if ft.cancelled() or not das:
            return

        logging.debug("Will save data to %s", fn)
        exporter.export(fn, das)
        self.showAcquisition(fn)
        dlg.Close()
Exemplo n.º 38
0
    def acquire(self, dlg):
        main_data = self.main_app.main_data
        str_ctrl = self._tab.streambar_controller
        str_ctrl.pauseStreams()
        dlg.pauseSettings()
        self._unsubscribe_vas()

        orig_pos = main_data.stage.position.value
        trep = (self.nx.value, self.ny.value)
        nb = trep[0] * trep[1]
        # It's not a big deal if it was a bad guess as we'll use the actual data
        # before the first move
        sfov = self._guess_smallest_fov()
        fn = self.filename.value
        exporter = dataio.find_fittest_converter(fn)
        fn_bs, fn_ext = udataio.splitext(fn)

        ss, stitch_ss = self._get_acq_streams()
        end = self.estimate_time() + time.time()

        ft = model.ProgressiveFuture(end=end)
        self.ft = ft  # allows future to be canceled in show_dlg after closing window
        ft.running_subf = model.InstantaneousFuture()
        ft._task_state = RUNNING
        ft._task_lock = threading.Lock()
        ft.task_canceller = self._cancel_acquisition  # To allow cancelling while it's running
        ft.set_running_or_notify_cancel()  # Indicate the work is starting now
        dlg.showProgress(ft)

        # For stitching only
        da_list = []  # for each position, a list of DataArrays
        i = 0
        prev_idx = [0, 0]
        try:
            for ix, iy in self._generate_scanning_indices(trep):
                logging.debug("Acquiring tile %dx%d", ix, iy)
                self._move_to_tile((ix, iy), orig_pos, sfov, prev_idx)
                prev_idx = ix, iy
                # Update the progress bar
                ft.set_progress(end=self.estimate_time(nb - i) + time.time())

                ft.running_subf = acq.acquire(
                    ss, self.main_app.main_data.settings_obs)
                das, e = ft.running_subf.result(
                )  # blocks until all the acquisitions are finished
                if e:
                    logging.warning(
                        "Acquisition for tile %dx%d partially failed: %s", ix,
                        iy, e)

                if ft._task_state == CANCELLED:
                    raise CancelledError()

                # TODO: do in a separate thread
                fn_tile = "%s-%.5dx%.5d%s" % (fn_bs, ix, iy, fn_ext)
                logging.debug("Will save data of tile %dx%d to %s", ix, iy,
                              fn_tile)
                exporter.export(fn_tile, das)

                if ft._task_state == CANCELLED:
                    raise CancelledError()

                if self.stitch.value:
                    # Sort tiles (largest sem on first position)
                    da_list.append(self.sort_das(das, stitch_ss))

                # Check the FoV is correct using the data, and if not update
                if i == 0:
                    sfov = self._check_fov(das, sfov)
                i += 1

            # Move stage to original position
            main_data.stage.moveAbs(orig_pos)

            # Stitch SEM and CL streams
            st_data = []
            if self.stitch.value and (not da_list or not da_list[0]):
                # if only AR or Spectrum are acquired
                logging.warning(
                    "No stream acquired that can be used for stitching.")
            elif self.stitch.value:
                logging.info("Acquisition completed, now stitching...")
                ft.set_progress(end=self.estimate_time(0) + time.time())

                logging.info("Computing big image out of %d images",
                             len(da_list))
                das_registered = stitching.register(da_list)

                # Select weaving method
                # On a Sparc system the mean weaver gives the best result since it
                # smoothes the transitions between tiles. However, using this weaver on the
                # Secom/Delphi generates an image with dark stripes in the overlap regions which are
                # the result of carbon decomposition effects that typically occur in samples imaged
                # by these systems. To mediate this, we use the
                # collage_reverse weaver that only shows the overlap region of the tile that
                # was imaged first.
                if self.microscope.role in ("secom", "delphi"):
                    weaving_method = WEAVER_COLLAGE_REVERSE
                    logging.info(
                        "Using weaving method WEAVER_COLLAGE_REVERSE.")
                else:
                    weaving_method = WEAVER_MEAN
                    logging.info("Using weaving method WEAVER_MEAN.")

                # Weave every stream
                if isinstance(das_registered[0], tuple):
                    for s in range(len(das_registered[0])):
                        streams = []
                        for da in das_registered:
                            streams.append(da[s])
                        da = stitching.weave(streams, weaving_method)
                        da.metadata[
                            model.MD_DIMS] = "YX"  # TODO: do it in the weaver
                        st_data.append(da)
                else:
                    da = stitching.weave(das_registered, weaving_method)
                    st_data.append(da)

                # Save
                exporter = dataio.find_fittest_converter(fn)
                if exporter.CAN_SAVE_PYRAMID:
                    exporter.export(fn, st_data, pyramid=True)
                else:
                    logging.warning(
                        "File format doesn't support saving image in pyramidal form"
                    )
                    exporter.export(fn, st_data)

            ft.set_result(None)  # Indicate it's over

            # End of the (completed) acquisition
            if ft._task_state == CANCELLED:
                raise CancelledError()
            dlg.Close()

            # Open analysis tab
            if st_data:
                popup.show_message(self.main_app.main_frame,
                                   "Tiled acquisition complete",
                                   "Will display stitched image")
                self.showAcquisition(fn)
            else:
                popup.show_message(self.main_app.main_frame,
                                   "Tiled acquisition complete",
                                   "Will display last tile")
                # It's easier to know the last filename, and it's also the most
                # interesting for the user, as if something went wrong (eg, focus)
                # it's the tile the most likely to show it.
                self.showAcquisition(fn_tile)

            # TODO: also export a full image (based on reported position, or based
            # on alignment detection)
        except CancelledError:
            logging.debug("Acquisition cancelled")
            dlg.resumeSettings()
        except Exception as ex:
            logging.exception("Acquisition failed.")
            ft.running_subf.cancel()
            ft.set_result(None)
            # Show also in the window. It will be hidden next time a setting is changed.
            self._dlg.setAcquisitionInfo("Acquisition failed: %s" % (ex, ),
                                         lvl=logging.ERROR)
        finally:
            logging.info("Tiled acquisition ended")
            main_data.stage.moveAbs(orig_pos)
Exemplo n.º 39
0
    def test_leech(self):
        """
        try acquisition with leech
        """
        # Create the streams and streamTree
        semsur = stream.SEMStream("test sem", self.sed, self.sed.data,
                                  self.ebeam)
        sems = stream.SEMStream("test sem cl", self.sed, self.sed.data,
                                self.ebeam)
        ars = stream.ARSettingsStream("test ar", self.ccd, self.ccd.data,
                                      self.ebeam)
        semars = stream.SEMARMDStream("test SEM/AR", [sems, ars])
        st = stream.StreamTree(streams=[semsur, semars])

        pcd = Fake0DDetector("test")
        pca = ProbeCurrentAcquirer(pcd)
        sems.leeches.append(pca)
        semsur.leeches.append(pca)

        # SEM survey settings are via the current hardware settings
        self.ebeam.dwellTime.value = self.ebeam.dwellTime.range[0]

        # SEM/AR settings are via the AR stream
        ars.roi.value = (0.1, 0.1, 0.8, 0.8)
        mx_brng = self.ccd.binning.range[1]
        binning = tuple(min(4, mx) for mx in mx_brng)  # try binning 4x4
        self.ccd.binning.value = binning
        self.ccd.exposureTime.value = 1  # s
        ars.repetition.value = (2, 3)
        num_ar = numpy.prod(ars.repetition.value)

        pca.period.value = 10  # Only at beginning and end

        est_time = acq.estimateTime(st.getProjections())

        # prepare callbacks
        self.start = None
        self.end = None
        self.updates = 0
        self.done = 0

        # Run acquisition
        start = time.time()
        f = acq.acquire(st.getProjections())
        f.add_update_callback(self.on_progress_update)
        f.add_done_callback(self.on_done)

        data, e = f.result()
        dur = time.time() - start
        self.assertGreaterEqual(dur, est_time /
                                2)  # Estimated time shouldn't be too small
        self.assertIsInstance(data[0], model.DataArray)
        self.assertIsNone(e)
        self.assertEqual(len(data), num_ar + 2)

        thumb = acq.computeThumbnail(st, f)
        self.assertIsInstance(thumb, model.DataArray)

        self.assertGreaterEqual(self.updates, 1)  # at least one update at end
        self.assertLessEqual(self.end, time.time())
        self.assertTrue(not f.cancelled())

        time.sleep(0.1)
        self.assertEqual(self.done, 1)

        for da in data:
            pcmd = da.metadata[model.MD_EBEAM_CURRENT_TIME]
            self.assertEqual(len(pcmd), 2)
Exemplo n.º 40
0
    def test_sync_path_guess(self):
        """
        try synchronized acquisition using the Optical Path Manager
        """
        # Create the streams and streamTree
        opmngr = path.OpticalPathManager(self.microscope)
        semsur = stream.SEMStream("test sem", self.sed, self.sed.data,
                                  self.ebeam)
        sems = stream.SEMStream("test sem cl", self.sed, self.sed.data,
                                self.ebeam)
        ars = stream.ARSettingsStream("test ar",
                                      self.ccd,
                                      self.ccd.data,
                                      self.ebeam,
                                      opm=opmngr)
        semars = stream.SEMARMDStream("test SEM/AR", [sems, ars])
        specs = stream.SpectrumSettingsStream("test spec",
                                              self.spec,
                                              self.spec.data,
                                              self.ebeam,
                                              opm=opmngr)
        sps = stream.SEMSpectrumMDStream("test sem-spec", [sems, specs])
        st = stream.StreamTree(streams=[semsur, semars, sps])

        # SEM survey settings are via the current hardware settings
        self.ebeam.dwellTime.value = self.ebeam.dwellTime.range[0]

        # SEM/AR/SPEC settings are via the AR stream
        ars.roi.value = (0.1, 0.1, 0.8, 0.8)
        specs.roi.value = (0.2, 0.2, 0.7, 0.7)
        mx_brng = self.ccd.binning.range[1]
        binning = tuple(min(4, mx) for mx in mx_brng)  # try binning 4x4
        self.ccd.binning.value = binning
        self.ccd.exposureTime.value = 1  # s
        ars.repetition.value = (2, 3)
        specs.repetition.value = (3, 2)
        num_ar = numpy.prod(ars.repetition.value)

        est_time = acq.estimateTime(st.getProjections())

        # prepare callbacks
        self.start = None
        self.end = None
        self.updates = 0
        self.done = 0

        # Run acquisition
        start = time.time()
        f = acq.acquire(st.getProjections())
        f.add_update_callback(self.on_progress_update)
        f.add_done_callback(self.on_done)

        data, e = f.result()
        dur = time.time() - start
        self.assertGreaterEqual(dur, est_time /
                                2)  # Estimated time shouldn't be too small
        self.assertIsInstance(data[0], model.DataArray)
        self.assertIsNone(e)
        self.assertEqual(len(data), num_ar + 4)

        thumb = acq.computeThumbnail(st, f)
        self.assertIsInstance(thumb, model.DataArray)

        self.assertGreaterEqual(self.updates, 1)  # at least one update at end
        self.assertLessEqual(self.end, time.time())
        self.assertTrue(not f.cancelled())

        # assert optical path configuration
        exp_pos = path.SPARC_MODES["spectral"][1]
        self.assertEqual(self.lenswitch.position.value, exp_pos["lens-switch"])
        self.assertEqual(self.spec_det_sel.position.value,
                         exp_pos["spec-det-selector"])
        self.assertEqual(self.ar_spec_sel.position.value,
                         exp_pos["ar-spec-selector"])

        time.sleep(0.1)
        self.assertEqual(self.done, 1)
Exemplo n.º 41
0
    def acquire(self, dlg):
        """
        Acquisition operation.
        """
        main_data = self.main_app.main_data
        str_ctrl = main_data.tab.value.streambar_controller
        stream_paused = str_ctrl.pauseStreams()
        dlg.pauseSettings()

        nb = self.numberofAcquisitions.value
        ss = self._get_acq_streams()

        sacqt = acq.estimateTime(ss)
        
        completed = False

        try:
            step_time = self.initAcquisition()
            logging.debug("Acquisition streams: %s", ss)

            # TODO: if drift correction, use it over all the time
            f = model.ProgressiveFuture()
            f.task_canceller = lambda l: True  # To allow cancelling while it's running
            f.set_running_or_notify_cancel()  # Indicate the work is starting now
            dlg.showProgress(f)

            # list of list of DataArray: for each stream, for each acquisition, the data acquired
            images = None
        
            for i in range(nb):
                left = nb - i
                dur = sacqt * left + step_time * (left - 1)

                logging.debug("Acquisition %d of %d", i, nb)

                if left == 1 and last_ss:
                    ss += last_ss
                    dur += acq.estimateTime(ss) - sacqt

                startt = time.time()
                f.set_progress(end=startt + dur)
                das, e = acq.acquire(ss).result()
                if images is None:
                    # Copy metadata from the first acquisition
                    images = [[] for i in range(len(das))]

                for im, da in zip(images, das):
                    im.append(da)

                if f.cancelled():
                    raise CancelledError()

                # Execute an action to prepare the next acquisition for the ith acquisition
                self.stepAcquisition(i, images)

            f.set_result(None)  # Indicate it's over
            
            # Construct a cube from each stream's image.
            images = self.postProcessing(images)

            # Export image
            exporter = dataio.find_fittest_converter(self.filename.value)
            exporter.export(self.filename.value, images)
            completed = True
            dlg.Close()
            
        except CancelledError:
            logging.debug("Acquisition cancelled.")
            dlg.resumeSettings()

        except e:
            logging.exception(e)

        finally:
            # Do completion actions
            self.completeAcquisition(completed)
Exemplo n.º 42
0
    def acquire(self, dlg):
        """
        Acquisition operation.
        """
        main_data = self.main_app.main_data
        str_ctrl = main_data.tab.value.streambar_controller
        stream_paused = str_ctrl.pauseStreams()
        dlg.pauseSettings()

        nb = self.numberofAcquisitions.value
        ss = self._get_acq_streams()

        sacqt = acq.estimateTime(ss)

        completed = False

        try:
            step_time = self.initAcquisition()
            logging.debug("Acquisition streams: %s", ss)

            # TODO: if drift correction, use it over all the time
            f = model.ProgressiveFuture()
            f.task_canceller = lambda l: True  # To allow cancelling while it's running
            f.set_running_or_notify_cancel(
            )  # Indicate the work is starting now
            dlg.showProgress(f)

            # list of list of DataArray: for each stream, for each acquisition, the data acquired
            images = None

            for i in range(nb):
                left = nb - i
                dur = sacqt * left + step_time * (left - 1)

                logging.debug("Acquisition %d of %d", i, nb)

                startt = time.time()
                f.set_progress(end=startt + dur)
                das, e = acq.acquire(
                    ss, self.main_app.main_data.settings_obs).result()
                if images is None:
                    # Copy metadata from the first acquisition
                    images = [[] for i in range(len(das))]

                for im, da in zip(images, das):
                    im.append(da)

                if f.cancelled():
                    raise CancelledError()

                # Execute an action to prepare the next acquisition for the ith acquisition
                self.stepAcquisition(i, images)

            f.set_result(None)  # Indicate it's over

            # Construct a cube from each stream's image.
            images = self.postProcessing(images)

            # Export image
            exporter = dataio.find_fittest_converter(self.filename.value)
            exporter.export(self.filename.value, images)
            completed = True
            dlg.Close()

        except CancelledError:
            logging.debug("Acquisition cancelled.")
            dlg.resumeSettings()

        except e:
            logging.exception(e)

        finally:
            # Do completion actions
            self.completeAcquisition(completed)
Exemplo n.º 43
0
    def test_sync_path_guess(self):
        """
        try synchronized acquisition using the Optical Path Manager
        """
        # Create the streams and streamTree
        opmngr = path.OpticalPathManager(self.microscope)
        semsur = stream.SEMStream("test sem", self.sed, self.sed.data, self.ebeam)
        sems = stream.SEMStream("test sem cl", self.sed, self.sed.data, self.ebeam)
        ars = stream.ARSettingsStream("test ar", self.ccd, self.ccd.data, self.ebeam, opm=opmngr)
        semars = stream.SEMARMDStream("test SEM/AR", sems, ars)
        specs = stream.SpectrumSettingsStream("test spec", self.spec, self.spec.data, self.ebeam, opm=opmngr)
        sps = stream.SEMSpectrumMDStream("test sem-spec", sems, specs)
        st = stream.StreamTree(streams=[semsur, semars, sps])

        # SEM survey settings are via the current hardware settings
        self.ebeam.dwellTime.value = self.ebeam.dwellTime.range[0]

        # SEM/AR/SPEC settings are via the AR stream
        ars.roi.value = (0.1, 0.1, 0.8, 0.8)
        specs.roi.value = (0.2, 0.2, 0.7, 0.7)
        mx_brng = self.ccd.binning.range[1]
        binning = tuple(min(4, mx) for mx in mx_brng) # try binning 4x4
        self.ccd.binning.value = binning
        self.ccd.exposureTime.value = 1 # s
        ars.repetition.value = (2, 3)
        specs.repetition.value = (3, 2)
        num_ar = numpy.prod(ars.repetition.value)

        est_time = acq.estimateTime(st.getStreams())

        # prepare callbacks
        self.start = None
        self.end = None
        self.updates = 0
        self.done = 0

        # Run acquisition
        start = time.time()
        f = acq.acquire(st.getStreams())
        f.add_update_callback(self.on_progress_update)
        f.add_done_callback(self.on_done)

        data, e = f.result()
        dur = time.time() - start
        self.assertGreaterEqual(dur, est_time / 2) # Estimated time shouldn't be too small
        self.assertIsInstance(data[0], model.DataArray)
        self.assertIsNone(e)
        self.assertEqual(len(data), num_ar + 4)

        thumb = acq.computeThumbnail(st, f)
        self.assertIsInstance(thumb, model.DataArray)

        self.assertGreaterEqual(self.updates, 1) # at least one update at end
        self.assertLessEqual(self.end, time.time())
        self.assertEqual(self.done, 1)
        self.assertTrue(not f.cancelled())

        # assert optical path configuration
        exp_pos = path.SPARC_MODES["spectral"][1]
        self.assertEqual(self.lenswitch.position.value, exp_pos["lens-switch"])
        self.assertEqual(self.spec_det_sel.position.value, exp_pos["spec-det-selector"])
        self.assertEqual(self.ar_spec_sel.position.value, exp_pos["ar-spec-selector"])
Exemplo n.º 44
0
def acquire_spec(wls, wle, res, dt, filename):
    """
    wls (float): start wavelength in m
    wle (float): end wavelength in m
    res (int): number of points to acquire
    dt (float): dwell time in seconds
    filename (str): filename to save to
    """
    # TODO: take a progressive future to update and know if it's the end

    ebeam = model.getComponent(role="e-beam")
    sed = model.getComponent(role="se-detector")
    mchr = model.getComponent(role="monochromator")
    try:
        sgrh = model.getComponent(role="spectrograph")
    except LookupError:
        sgrh = model.getComponent(role="spectrograph-dedicated")
    opm = acq.path.OpticalPathManager(model.getMicroscope())

    prev_dt = ebeam.dwellTime.value
    prev_res = ebeam.resolution.value
    prev_scale = ebeam.scale.value
    prev_trans = ebeam.translation.value
    prev_wl = sgrh.position.value["wavelength"]

    # Create a stream for monochromator scan
    mchr_s = MonochromatorScanStream("Spectrum", mchr, ebeam, sgrh, opm=opm)
    mchr_s.startWavelength.value = wls
    mchr_s.endWavelength.value = wle
    mchr_s.numberOfPixels.value = res
    mchr_s.dwellTime.value = dt
    mchr_s.emtTranslation.value = ebeam.translation.value

    # Create SEM survey stream
    survey_s = stream.SEMStream(
        "Secondary electrons survey",
        sed,
        sed.data,
        ebeam,
        emtvas={"translation", "scale", "resolution", "dwellTime"},
    )
    # max FoV, with scale 4
    survey_s.emtTranslation.value = (0, 0)
    survey_s.emtScale.value = (4, 4)
    survey_s.emtResolution.value = (v / 4 for v in ebeam.resolution.range[1])
    survey_s.emtDwellTime.value = 10e-6  # 10µs is hopefully enough

    # Acquire using the acquisition manager
    # Note: the monochromator scan stream is unknown to the acquisition manager,
    # so it'll be done last
    expt = acq.estimateTime([survey_s, mchr_s])
    f = acq.acquire([survey_s, mchr_s])

    try:
        # Note: the timeout is important, as it allows to catch KeyboardInterrupt
        das, e = f.result(2 * expt + 1)
    except KeyboardInterrupt:
        logging.info("Stopping before end of acquisition")
        f.cancel()
        return
    finally:
        logging.debug("Restoring hardware settings")
        if prev_res != (1, 1):
            ebeam.resolution.value = prev_res
        ebeam.dwellTime.value = prev_dt
        sgrh.moveAbs({"wavelength": prev_wl})
        ebeam.scale.value = prev_scale
        ebeam.translation.value = prev_trans
        if prev_res != (1, 1):
            ebeam.resolution.value = prev_res
        ebeam.dwellTime.value = prev_dt

    if e:
        logging.error("Acquisition failed: %s", e)

    if das:
        # Save the file
        exporter = dataio.find_fittest_converter(filename)
        exporter.export(filename, das)
        logging.info("Spectrum successfully saved to %s", filename)
        input("Press Enter to close.")
Exemplo n.º 45
0
def acquire_spec(wls, wle, res, dt, filename):
    """
    wls (float): start wavelength in m
    wle (float): end wavelength in m
    res (int): number of points to acquire
    dt (float): dwell time in seconds
    filename (str): filename to save to
    """
    # TODO: take a progressive future to update and know if it's the end

    ebeam = model.getComponent(role="e-beam")
    sed = model.getComponent(role="se-detector")
    mchr = model.getComponent(role="monochromator")
    sgrh = model.getComponent(role="spectrograph")

    prev_dt = ebeam.dwellTime.value
    prev_res = ebeam.resolution.value
    prev_scale = ebeam.scale.value
    prev_trans = ebeam.translation.value
    prev_wl = sgrh.position.value["wavelength"]

    # Create a stream for monochromator scan
    mchr_s = MonochromatorScanStream("Spectrum", mchr, ebeam, sgrh)
    mchr_s.startWavelength.value = wls
    mchr_s.endWavelength.value = wle
    mchr_s.numberOfPixels.value = res
    mchr_s.dwellTime.value = dt
    mchr_s.emtTranslation.value = ebeam.translation.value

    # Create SEM survey stream
    survey_s = stream.SEMStream("Secondary electrons survey",
                                sed, sed.data, ebeam,
        emtvas={"translation", "scale", "resolution", "dwellTime"},
    )
    # max FoV, with scale 4
    survey_s.emtTranslation.value = (0, 0)
    survey_s.emtScale.value = (4, 4)
    survey_s.emtResolution.value = (v / 4 for v in ebeam.resolution.range[1])
    survey_s.emtDwellTime.value = 10e-6  # 10µs is hopefully enough

    # Acquire using the acquisition manager
    # Note: the monochromator scan stream is unknown to the acquisition manager,
    # so it'll be done last
    expt = acq.estimateTime([survey_s, mchr_s])
    f = acq.acquire([survey_s, mchr_s])

    try:
        # Note: the timeout is important, as it allows to catch KeyboardInterrupt
        das, e = f.result(2 * expt + 1)
    except KeyboardInterrupt:
        logging.info("Stopping before end of acquisition")
        f.cancel()
        return
    finally:
        logging.debug("Restoring hardware settings")
        if prev_res != (1, 1):
            ebeam.resolution.value = prev_res
        ebeam.dwellTime.value = prev_dt
        sgrh.moveAbs({"wavelength": prev_wl})
        ebeam.scale.value = prev_scale
        ebeam.translation.value = prev_trans
        if prev_res != (1, 1):
            ebeam.resolution.value = prev_res
        ebeam.dwellTime.value = prev_dt

    if e:
        logging.error("Acquisition failed: %s", e)

    if das:
        # Save the file
        exporter = dataio.find_fittest_converter(filename)
        exporter.export(filename, das)
        logging.info("Spectrum successfully saved to %s", filename)
        raw_input("Press Enter to close.")
Exemplo n.º 46
0
    def acquire(self, dlg):
        # Stop the spot stream and any other stream playing to not interfere with the acquisition
        self._pause_streams()

        # We use the acquisition CL intensity stream, so there is a concurrent
        # SEM acquisition (just the survey). The drift correction is run both
        # during the acquisition, and in-between each acquisition. The drift
        # between each acquisition is corrected by updating the metadata. So
        # it's some kind of post-processing compensation. The advantage is that
        # it doesn't affect the data, and if the entire field of view is imaged,
        # it still works properly, but when opening in another software (eg,
        # ImageJ), that compensation will not be applied automatically).
        # Alternatively, the images could be cropped to just the region which is
        # common for all the acquisitions, but there might then be data loss.
        # Note: The compensation could also be done by updating the ROI of the
        # CL stream. However, in the most common case, the user will acquire the
        # entire area, so drift compensation cannot be applied. We could also
        # use SEM concurrent stream and measure drift afterwards but that
        # doubles the dwell time).
        dt_survey, dt_clint, dt_drift = self._calc_acq_times()


        das = []
        fn = self.filename.value
        exporter = dataio.find_fittest_converter(fn)

        dur = self.expectedDuration.value
        end = time.time() + dur
        ft = model.ProgressiveFuture(end=end)
        ft.task_canceller = lambda l: True  # To allow cancelling while it's running
        ft.set_running_or_notify_cancel()  # Indicate the work is starting now
        dlg.showProgress(ft)

        # TODO: if the user cancels the acquisition, the GUI shows as if it's
        # been immediately stopped, but the code only detects it at the end of
        # the CL acquisition. => Need to listen to the future, and immediately
        # cancel the current acquisition.
        try:
            # acquisition of SEM survey
            if self._survey_s:
                d, e = acq.acquire([self._survey_s]).result()
                das.extend(d)
            dur -= dt_survey
            ft.set_progress(end=time.time() + dur)

            # Extra drift correction between each filter
            dc_roi = self._acqui_tab.driftCorrector.roi.value
            dc_dt = self._acqui_tab.driftCorrector.dwellTime.value

            # drift correction vector
            tot_dc_vect = (0, 0)
            if dc_roi != UNDEFINED_ROI:
                drift_est = drift.AnchoredEstimator(self.ebeam, self.sed,
                                                    dc_roi, dc_dt)
                drift_est.acquire()
                dur -= dt_drift
                ft.set_progress(end=time.time() + dur)
            else:
                drift_est = None

            # Loop over the filters, for now it's fixed to 3 but this could be flexible
            for fb, co in zip(self._filters, self._colours):
                logging.info("Moving to band %s with component %s", fb.value, self.filterwheel.name)
                self.filterwheel.moveAbs({"band": fb.value}).result()
                ft.set_progress(end=time.time() + dur)

                # acquire CL stream
                d, e = acq.acquire([self._cl_int_s]).result()
                if ft.cancelled():
                    return
                dur -= dt_clint
                ft.set_progress(end=time.time() + dur)

                if drift_est:
                    drift_est.acquire()
                    dc_vect = drift_est.estimate()
                    pxs = self.ebeam.pixelSize.value
                    tot_dc_vect = (tot_dc_vect[0] + dc_vect[0] * pxs[0],
                                   tot_dc_vect[1] - dc_vect[1] * pxs[1])  # Y is inverted in physical coordinates
                    dur -= dt_drift
                    ft.set_progress(end=time.time() + dur)

                # Convert the CL intensity stream into a "fluo" stream so that it's nicely displayed (in colour) in the viewer
                for da in d:
                    # Update the center position based on drift
                    pos = da.metadata[model.MD_POS]
                    logging.debug("Correcting position for drift by %s m", tot_dc_vect)
                    pos = tuple(p + dc for p, dc in zip(pos, tot_dc_vect))
                    da.metadata[model.MD_POS] = pos

                    if model.MD_OUT_WL not in da.metadata:
                        # check it's not the SEM concurrent stream
                        continue
                    # Force the colour, which forces it to be a FluoStream when
                    # opening it in the analysis tab, for nice colour merging.
                    da.metadata[model.MD_USER_TINT] = co

                das.extend(d)

            ft.set_result(None)  # Indicate it's over

        except CancelledError:
            pass
        finally:
            # Make sure we don't hold reference to the streams forever
            self._survey_s = None
            self._cl_int_s = None

        if not ft.cancelled() and das:
            if e:
                logging.warning("RGB CL intensity acquisition partially failed: %s", e)
            logging.debug("Will save data to %s", fn)
            # logging.debug("Going to export data: %s", das)
            exporter.export(fn, das)
            self.showAcquisition(fn)

        dlg.Close()
Exemplo n.º 47
0
    def acquire(self, dlg):
        # Stop the spot stream and any other stream playing to not interfere with the acquisition
        self._pause_streams()

        # We use the acquisition CL intensity stream, so there is a concurrent
        # SEM acquisition (just the survey). The drift correction is run both
        # during the acquisition, and in-between each acquisition. The drift
        # between each acquisition is corrected by updating the metadata. So
        # it's some kind of post-processing compensation. The advantage is that
        # it doesn't affect the data, but when opening in another software (eg,
        # ImageJ), that compensation will not be applied automatically).
        # Alternatively, the images could be cropped to just the region which is
        # common for all the acquisitions, but there might then be data loss.
        # Note: The compensation could also be done by updating the ROI of the
        # CL stream. However, in the most common case, the user will acquire the
        # entire area, so drift compensation cannot be applied. We could also
        # use SEM concurrent stream and measure drift afterwards but that
        # doubles the dwell time).
        dt_survey, dt_clint, dt_drift = self._calc_acq_times()


        das = []
        fn = self.filename.value
        exporter = dataio.find_fittest_converter(fn)

        dur = self.expectedDuration.value
        end = time.time() + dur
        ft = model.ProgressiveFuture(end=end)
        ft.task_canceller = lambda l: True  # To allow cancelling while it's running
        ft.set_running_or_notify_cancel()  # Indicate the work is starting now
        dlg.showProgress(ft)

        # TODO: if the user cancels the acquisition, the GUI shows as if it's
        # been immediately stopped, but the code only detects it at the end of
        # the CL acquisition. => Need to listen to the future, and immediately
        # cancel the current acquisition.
        try:
            # acquisition of SEM survey
            if self._survey_s:
                d, e = acq.acquire([self._survey_s]).result()
                das.extend(d)
            dur -= dt_survey
            ft.set_progress(end=time.time() + dur)

            # Extra drift correction between each filter
            dc_roi = self._acqui_tab.driftCorrector.roi.value
            dc_dt = self._acqui_tab.driftCorrector.dwellTime.value

            # drift correction vector
            tot_dc_vect = (0, 0)
            if dc_roi != UNDEFINED_ROI:
                drift_est = drift.AnchoredEstimator(self.ebeam, self.sed,
                                                    dc_roi, dc_dt)
                drift_est.acquire()
                dur -= dt_drift
                ft.set_progress(end=time.time() + dur)
            else:
                drift_est = None

            # Loop over the filters, for now it's fixed to 3 but this could be flexible
            for fb, co in zip(self._filters, self._colours):
                logging.info("Moving to band %s with component %s", fb.value, self.filterwheel.name)
                self.filterwheel.moveAbs({"band": fb.value}).result()
                ft.set_progress(end=time.time() + dur)

                # acquire CL stream
                d, e = acq.acquire([self._cl_int_s]).result()
                if ft.cancelled():
                    return
                dur -= dt_clint
                ft.set_progress(end=time.time() + dur)

                if drift_est:
                    drift_est.acquire()
                    dc_vect = drift_est.estimate()
                    pxs = self.ebeam.pixelSize.value
                    tot_dc_vect = (tot_dc_vect[0] + dc_vect[0] * pxs[0],
                                   tot_dc_vect[1] - dc_vect[1] * pxs[1])  # Y is inverted in physical coordinates
                    dur -= dt_drift
                    ft.set_progress(end=time.time() + dur)

                # Convert the CL intensity stream into a "fluo" stream so that it's nicely displayed (in colour) in the viewer
                for da in d:
                    # Update the center position based on drift
                    pos = da.metadata[model.MD_POS]
                    logging.debug("Correcting position for drift by %s m", tot_dc_vect)
                    pos = tuple(p + dc for p, dc in zip(pos, tot_dc_vect))
                    da.metadata[model.MD_POS] = pos

                    if model.MD_OUT_WL not in da.metadata:
                        # check it's not the SEM concurrent stream
                        continue
                    # Force the colour, which forces it to be a FluoStream when
                    # opening it in the analysis tab, for nice colour merging.
                    da.metadata[model.MD_USER_TINT] = co

                das.extend(d)

            ft.set_result(None)  # Indicate it's over

        except CancelledError:
            pass
        finally:
            # Make sure we don't hold reference to the streams forever
            self._survey_s = None
            self._cl_int_s = None

        if not ft.cancelled() and das:
            if e:
                logging.warning("RGB CL intensity acquisition partially failed: %s", e)
            logging.debug("Will save data to %s", fn)
            # logging.debug("Going to export data: %s", das)
            exporter.export(fn, das)
            self.showAcquisition(fn)

        dlg.Close()