def acquire(self, dlg): main_data = self.main_app.main_data str_ctrl = main_data.tab.value.streambar_controller stream_paused = str_ctrl.pauseStreams() dlg.pauseSettings() nb = self.numberOfAcquisitions.value p = self.period.value ss, last_ss = self._get_acq_streams() fn = self.filename.value exporter = dataio.find_fittest_converter(fn) bs, ext = splitext(fn) fn_pat = bs + "-%.5d" + ext sacqt = acq.estimateTime(ss) intp = max(0, p - sacqt) if p < sacqt: logging.warning( "Acquisition will take %g s, but period between acquisition must be only %g s", sacqt, p ) # TODO: if drift correction, use it over all the time f = model.ProgressiveFuture() f.task_canceller = lambda l: True # To allow cancelling while it's running f.set_running_or_notify_cancel() # Indicate the work is starting now dlg.showProgress(f) for i in range(nb): left = nb - i dur = sacqt * left + intp * (left - 1) if left == 1 and last_ss: ss += last_ss dur += acq.estimateTime(ss) - sacqt startt = time.time() f.set_progress(end=startt + dur) das, e = acq.acquire(ss).result() if f.cancelled(): dlg.resumeSettings() return exporter.export(fn_pat % (i,), das) # Wait the period requested, excepted the last time if left > 1: sleept = (startt + p) - time.time() if sleept > 0: time.sleep(sleept) else: logging.info("Immediately starting next acquisition, %g s late", -sleept) f.set_result(None) # Indicate it's over # self.showAcquisition(self.filename.value) dlg.Close()
def _fast_acquire_one(self, dlg, st, last_ss): """ Acquires one stream, *as fast as possible* (ie, the period is not used). Only works with LiveStreams (and not with MDStreams) st (LiveStream) last_ss (list of Streams): all the streams to be acquire on the last time """ # Essentially, we trick a little bit the stream, by convincing it that # we want a live view, but instead of display the data, we store them. # It's much faster because we don't have to stop/start the detector between # each acquisition. nb = self.numberOfAcquisitions.value fn = self.filename.value self._exporter = dataio.find_fittest_converter(fn) bs, ext = splitext(fn) fn_pat = bs + "-%.5d" + ext self._acq_completed = threading.Event() f = model.ProgressiveFuture() f.task_canceller = self._cancel_fast_acquire f._stream = st if last_ss: nb -= 1 extra_dur = acq.estimateTime([st] + last_ss) else: extra_dur = 0 self._hijack_live_stream(st, f, nb, fn_pat, extra_dur) try: # Start acquisition and wait until it's done f.set_running_or_notify_cancel( ) # Indicate the work is starting now dlg.showProgress(f) st.is_active.value = True self._acq_completed.wait() if f.cancelled(): dlg.resumeSettings() return finally: st.is_active.value = False # just to be extra sure it's stopped logging.debug("Restoring stream %s", st) self._restore_live_stream(st) # last "normal" acquisition, if needed if last_ss: logging.debug("Acquiring last acquisition, with all the streams") ss = [st] + last_ss f.set_progress(end=time.time() + acq.estimateTime(ss)) das, e = acq.acquire( ss, self.main_app.main_data.settings_obs).result() self._save_data(fn_pat % (nb, ), das) self._stop_saving_threads() # Wait for all the data to be stored f.set_result(None) # Indicate it's over
def acquire(self, dlg): main_data = self.main_app.main_data str_ctrl = main_data.tab.value.streambar_controller stream_paused = str_ctrl.pauseStreams() dlg.pauseSettings() self._start_saving_threads(4) ss, last_ss = self._get_acq_streams() sacqt = acq.estimateTime(ss) p = self.period.value nb = self.numberOfAcquisitions.value try: # If the user just wants to acquire as fast as possible, and there # a single stream, we can use an optimised version if (len(ss) == 1 and isinstance(ss[0], LiveStream) and nb >= 2 and sacqt < 5 and p < sacqt + Stream.SETUP_OVERHEAD): logging.info( "Fast timelapse detected, will acquire as fast as possible" ) self._fast_acquire_one(dlg, ss[0], last_ss) else: self._acquire_multi(dlg, ss, last_ss) finally: # Make sure the threads are stopped even in case of error self._stop_saving_threads() # self.showAcquisition(self.filename.value) logging.debug("Closing dialog") dlg.Close()
def test_acq_fine_align(self): """ try acquisition with SEM + Optical + overlay streams """ # Create the streams sems = stream.SEMStream("test sem", self.sed, self.sed.data, self.ebeam) # SEM settings are via the current hardware settings self.ebeam.dwellTime.value = self.ebeam.dwellTime.range[0] fs1 = stream.FluoStream("test orange", self.ccd, self.ccd.data, self.light, self.light_filter) fs1.excitation.value = fs1.excitation.range[0] + 5e-9 fs1.emission.value = fs1.emission.range[0] + 5e-9 fs2 = stream.FluoStream("test blue", self.ccd, self.ccd.data, self.light, self.light_filter) fs2.excitation.value = fs2.excitation.range[1] - 5e-9 fs2.emission.value = fs2.emission.range[1] - 5e-9 self.ccd.exposureTime.value = 0.1 # s ovrl = stream.OverlayStream("overlay", self.ccd, self.ebeam, self.sed) ovrl.dwellTime.value = 0.3 ovrl.repetition.value = (7, 7) streams = [sems, fs1, fs2, ovrl] est_time = acq.estimateTime(streams) sum_est_time = sum(s.estimateAcquisitionTime() for s in streams) self.assertGreaterEqual(est_time, sum_est_time) # prepare callbacks self.past = None self.left = None self.updates = 0 self.done = 0 # Run acquisition start = time.time() f = acq.acquire(streams) f.add_update_callback(self.on_progress_update) f.add_done_callback(self.on_done) data = f.result() dur = time.time() - start self.assertGreater(dur, est_time / 2) # Estimated time shouldn't be too small self.assertIsInstance(data[0], model.DataArray) self.assertEqual(len(data), len(streams) - 1) # No overlay correction metadata anywhere (it has all been merged) for d in data: for k in [model.MD_ROTATION_COR, model.MD_PIXEL_SIZE_COR, model.MD_POS_COR]: self.assertNotIn(k, d.metadata) # thumb = acq.computeThumbnail(st, f) # self.assertIsInstance(thumb, model.DataArray) self.assertGreaterEqual(self.updates, 1) # at least one update at end self.assertEqual(self.left, 0) self.assertEqual(self.done, 1) self.assertTrue(not f.cancelled())
def update_acquisition_time(self): if self._ellipsis_animator: # cancel if there is an ellipsis animator updating the status message self._ellipsis_animator.cancel() self._ellipsis_animator = None # Don't update estimated time if acquisition is running (as we are # sharing the label with the estimated time-to-completion). if self._main_data_model.is_acquiring.value: return lvl = None # icon status shown if self._main_data_model.is_preparing.value: txt = u"Optical path is being reconfigured…" self._ellipsis_animator = EllipsisAnimator(txt, self.lbl_acqestimate) self._ellipsis_animator.start() lvl = logging.INFO elif self._roa.value == UNDEFINED_ROI: # TODO: update the default text to be the same txt = u"Region of acquisition needs to be selected" lvl = logging.WARN else: streams = self._tab_data_model.acquisitionStreams acq_time = acq.estimateTime(streams) acq_time = math.ceil(acq_time) # round a bit pessimistic txt = u"Estimated time is {}." txt = txt.format(units.readable_time(acq_time)) logging.debug("Updating status message %s, with level %s", txt, lvl) self.lbl_acqestimate.SetLabel(txt) self._show_status_icons(lvl)
def estimate_time(self, remaining=None): """ Estimates duration for acquisition and stitching. """ ss, stitch_ss = self._get_acq_streams() if remaining is None: remaining = self.nx.value * self.ny.value acqt = acq.estimateTime(ss) if self.stitch.value: # Estimate stitching time based on number of pixels in the overlapping part max_pxs = 0 for s in stitch_ss: for sda in s.raw: pxs = sda.shape[0] * sda.shape[1] if pxs > max_pxs: max_pxs = pxs stitcht = self.nx.value * self.ny.value * max_pxs * self.overlap.value * self.STITCH_SPEED else: stitcht = 0 try: movet = max( self._guess_smallest_fov()) * self.MOVE_SPEED * (remaining - 1) # current tile is part of remaining, so no need to move there except ValueError: # no current streams movet = 0.5 return acqt * remaining + movet + stitcht
def estimate_time(self, remaining=None): """ Estimates duration for acquisition and stitching. """ ss, stitch_ss = self._get_acq_streams() if remaining is None: remaining = self.nx.value * self.ny.value acqt = acq.estimateTime(ss) if self.stitch.value: # Estimate stitching time based on number of pixels in the overlapping part max_pxs = 0 for s in stitch_ss: for sda in s.raw: pxs = sda.shape[0] * sda.shape[1] if pxs > max_pxs: max_pxs = pxs stitcht = self.nx.value * self.ny.value * max_pxs * self.overlap.value * self.STITCH_SPEED else: stitcht = 0 try: movet = max(self._guess_smallest_fov()) * self.MOVE_SPEED * (remaining - 1) # current tile is part of remaining, so no need to move there except ValueError: # no current streams movet = 0.5 return acqt * remaining + movet + stitcht
def _update_exp_dur(self, _=None): """ Called when VA that affects the expected duration is changed """ nb = self.numberOfAcquisitions.value p = self.period.value ss, last_ss = self._get_acq_streams() sacqt = acq.estimateTime(ss) logging.debug("Estimating %g s acquisition for %d streams", sacqt, len(ss)) intp = max(0, p - sacqt) dur = sacqt * nb + intp * (nb - 1) if last_ss: dur += acq.estimateTime(ss + last_ss) - sacqt # Use _set_value as it's read only self.expectedDuration._set_value(math.ceil(dur), force_write=True)
def test_sync_sem_ccd(self): """ try acquisition with fairly complex SEM/CCD stream """ # Create the streams and streamTree semsur = stream.SEMStream("test sem", self.sed, self.sed.data, self.ebeam) sems = stream.SEMStream("test sem cl", self.sed, self.sed.data, self.ebeam) ars = stream.ARSettingsStream("test ar", self.ccd, self.ccd.data, self.ebeam) semars = stream.SEMARMDStream("test SEM/AR", [sems, ars]) st = stream.StreamTree(streams=[semsur, semars]) # SEM survey settings are via the current hardware settings self.ebeam.dwellTime.value = self.ebeam.dwellTime.range[0] # SEM/AR settings are via the AR stream ars.roi.value = (0.1, 0.1, 0.8, 0.8) mx_brng = self.ccd.binning.range[1] binning = tuple(min(4, mx) for mx in mx_brng) # try binning 4x4 self.ccd.binning.value = binning self.ccd.exposureTime.value = 1 # s ars.repetition.value = (2, 3) num_ar = numpy.prod(ars.repetition.value) est_time = acq.estimateTime(st.getProjections()) # prepare callbacks self.start = None self.end = None self.updates = 0 self.done = 0 # Run acquisition start = time.time() f = acq.acquire(st.getProjections()) f.add_update_callback(self.on_progress_update) f.add_done_callback(self.on_done) data, e = f.result() dur = time.time() - start self.assertGreaterEqual(dur, est_time / 2) # Estimated time shouldn't be too small self.assertIsInstance(data[0], model.DataArray) self.assertIsNone(e) self.assertEqual(len(data), num_ar + 2) thumb = acq.computeThumbnail(st, f) self.assertIsInstance(thumb, model.DataArray) self.assertGreaterEqual(self.updates, 1) # at least one update at end self.assertLessEqual(self.end, time.time()) self.assertTrue(not f.cancelled()) time.sleep(0.1) self.assertEqual(self.done, 1)
def update_acquisition_time(self): streams = self.get_acq_streams() if streams: acq_time = acq.estimateTime(streams) acq_time = math.ceil(acq_time) # round a bit pessimisticly txt = "The estimated acquisition time is {}." txt = txt.format(units.readable_time(acq_time)) else: txt = "No streams present." self.lbl_acqestimate.SetLabel(txt)
def _update_exp_dur(self, _=None): """ Called when VA that affects the expected duration is changed. """ if self._survey_stream is None: return strs = [self._survey_stream, self._secom_sem_cl_stream] dur = acq.estimateTime(strs) logging.debug("Estimating %g s acquisition for %d streams", dur, len(strs)) # Use _set_value as it's read only self.expectedDuration._set_value(math.ceil(dur), force_write=True)
def update_acquisition_time(self): if self._roa.value == UNDEFINED_ROI: # TODO: update the default text to be the same txt = "Region of acquisition needs to be selected" else: streams = self._tab_data_model.acquisitionView.getStreams() acq_time = acq.estimateTime(streams) acq_time = math.ceil(acq_time) # round a bit pessimistic txt = "Estimated time is {}." txt = txt.format(units.readable_time(acq_time)) self.lbl_acqestimate.SetLabel(txt)
def update_acquisition_time(self): streams = self._view.getStreams() if streams: if self.chkbox_fine_align.Value: streams.append(self._ovrl_stream) acq_time = acq.estimateTime(streams) acq_time = math.ceil(acq_time) # round a bit pessimistically txt = "The estimated acquisition time is {}." txt = txt.format(units.readable_time(acq_time)) else: txt = "No streams present." self.lbl_acqestimate.SetLabel(txt)
def update_acquisition_time(self): streams = self._view.getStreams() if streams: if self.chkbox_fine_align.Value: streams.append(self._ovrl_stream) acq_time = acq.estimateTime(streams) acq_time = math.ceil(acq_time) # round a bit pessimisticly txt = "The estimated acquisition time is {}." txt = txt.format(units.readable_time(acq_time)) else: txt = "No streams present." self.lbl_acqestimate.SetLabel(txt)
def update_acquisition_time(self): """ Must be called in the main GUI thread. """ streams = self.get_acq_streams() if streams: acq_time = acq.estimateTime(streams) acq_time = math.ceil(acq_time) # round a bit pessimisticly txt = "The estimated acquisition time is {}." txt = txt.format(units.readable_time(acq_time)) else: txt = "No streams present." self.lbl_acqestimate.SetLabel(txt)
def _update_exp_dur(self, _=None): """ Called when VA that affects the expected duration is changed """ nsteps = self.numberofAcquisitions.value speed = self.focus.speed.value['z'] step_time = driver.estimateMoveDuration(abs(self.zstep.value), speed, 0.01) ss = self._get_acq_streams() sacqt = acq.estimateTime(ss) logging.debug("Estimating %g s acquisition for %d streams", sacqt, len(ss)) dur = sacqt * nsteps + step_time * (nsteps - 1) # Use _set_value as it's read only self.expectedDuration._set_value(math.ceil(dur), force_write=True)
def _acquire(self, dlg, future): # Stop the streams dlg.streambar_controller.pauseStreams() # Acquire (even if it was live, to be sure it's the data is up-to-date) ss = self._get_acq_streams() dur = acq.estimateTime(ss) startt = time.time() future._cur_f = InstantaneousFuture() future.task_canceller = self._acq_canceller future.set_running_or_notify_cancel() # Indicate the work is starting now future.set_progress(end=startt + dur) dlg.showProgress(future) future._cur_f = acq.acquire(ss) das, e = future._cur_f.result() if future.cancelled(): raise CancelledError() if e: raise e return das
def _acquire(self, dlg, future): # Stop the streams dlg.streambar_controller.pauseStreams() # Acquire (even if it was live, to be sure it's the data is up-to-date) ss = self._get_acq_streams() dur = acq.estimateTime(ss) startt = time.time() future._cur_f = InstantaneousFuture() future.task_canceller = self._acq_canceller future.set_running_or_notify_cancel( ) # Indicate the work is starting now future.set_progress(end=startt + dur) dlg.showProgress(future) future._cur_f = acq.acquire(ss, self.main_app.main_data.settings_obs) das, e = future._cur_f.result() if future.cancelled(): raise CancelledError() if e: raise e return das
def test_leech(self): """ try acquisition with leech """ # Create the streams and streamTree semsur = stream.SEMStream("test sem", self.sed, self.sed.data, self.ebeam) sems = stream.SEMStream("test sem cl", self.sed, self.sed.data, self.ebeam) ars = stream.ARSettingsStream("test ar", self.ccd, self.ccd.data, self.ebeam) semars = stream.SEMARMDStream("test SEM/AR", [sems, ars]) st = stream.StreamTree(streams=[semsur, semars]) pcd = Fake0DDetector("test") pca = ProbeCurrentAcquirer(pcd) sems.leeches.append(pca) semsur.leeches.append(pca) # SEM survey settings are via the current hardware settings self.ebeam.dwellTime.value = self.ebeam.dwellTime.range[0] # SEM/AR settings are via the AR stream ars.roi.value = (0.1, 0.1, 0.8, 0.8) mx_brng = self.ccd.binning.range[1] binning = tuple(min(4, mx) for mx in mx_brng) # try binning 4x4 self.ccd.binning.value = binning self.ccd.exposureTime.value = 1 # s ars.repetition.value = (2, 3) num_ar = numpy.prod(ars.repetition.value) pca.period.value = 10 # Only at beginning and end est_time = acq.estimateTime(st.getProjections()) # prepare callbacks self.start = None self.end = None self.updates = 0 self.done = 0 # Run acquisition start = time.time() f = acq.acquire(st.getProjections()) f.add_update_callback(self.on_progress_update) f.add_done_callback(self.on_done) data, e = f.result() dur = time.time() - start self.assertGreaterEqual(dur, est_time / 2) # Estimated time shouldn't be too small self.assertIsInstance(data[0], model.DataArray) self.assertIsNone(e) self.assertEqual(len(data), num_ar + 2) thumb = acq.computeThumbnail(st, f) self.assertIsInstance(thumb, model.DataArray) self.assertGreaterEqual(self.updates, 1) # at least one update at end self.assertLessEqual(self.end, time.time()) self.assertTrue(not f.cancelled()) time.sleep(0.1) self.assertEqual(self.done, 1) for da in data: pcmd = da.metadata[model.MD_EBEAM_CURRENT_TIME] self.assertEqual(len(pcmd), 2)
def acquire_spec(wls, wle, res, dt, filename): """ wls (float): start wavelength in m wle (float): end wavelength in m res (int): number of points to acquire dt (float): dwell time in seconds filename (str): filename to save to """ # TODO: take a progressive future to update and know if it's the end ebeam = model.getComponent(role="e-beam") sed = model.getComponent(role="se-detector") mchr = model.getComponent(role="monochromator") try: sgrh = model.getComponent(role="spectrograph") except LookupError: sgrh = model.getComponent(role="spectrograph-dedicated") opm = acq.path.OpticalPathManager(model.getMicroscope()) prev_dt = ebeam.dwellTime.value prev_res = ebeam.resolution.value prev_scale = ebeam.scale.value prev_trans = ebeam.translation.value prev_wl = sgrh.position.value["wavelength"] # Create a stream for monochromator scan mchr_s = MonochromatorScanStream("Spectrum", mchr, ebeam, sgrh, opm=opm) mchr_s.startWavelength.value = wls mchr_s.endWavelength.value = wle mchr_s.numberOfPixels.value = res mchr_s.dwellTime.value = dt mchr_s.emtTranslation.value = ebeam.translation.value # Create SEM survey stream survey_s = stream.SEMStream( "Secondary electrons survey", sed, sed.data, ebeam, emtvas={"translation", "scale", "resolution", "dwellTime"}, ) # max FoV, with scale 4 survey_s.emtTranslation.value = (0, 0) survey_s.emtScale.value = (4, 4) survey_s.emtResolution.value = (v / 4 for v in ebeam.resolution.range[1]) survey_s.emtDwellTime.value = 10e-6 # 10µs is hopefully enough # Acquire using the acquisition manager # Note: the monochromator scan stream is unknown to the acquisition manager, # so it'll be done last expt = acq.estimateTime([survey_s, mchr_s]) f = acq.acquire([survey_s, mchr_s]) try: # Note: the timeout is important, as it allows to catch KeyboardInterrupt das, e = f.result(2 * expt + 1) except KeyboardInterrupt: logging.info("Stopping before end of acquisition") f.cancel() return finally: logging.debug("Restoring hardware settings") if prev_res != (1, 1): ebeam.resolution.value = prev_res ebeam.dwellTime.value = prev_dt sgrh.moveAbs({"wavelength": prev_wl}) ebeam.scale.value = prev_scale ebeam.translation.value = prev_trans if prev_res != (1, 1): ebeam.resolution.value = prev_res ebeam.dwellTime.value = prev_dt if e: logging.error("Acquisition failed: %s", e) if das: # Save the file exporter = dataio.find_fittest_converter(filename) exporter.export(filename, das) logging.info("Spectrum successfully saved to %s", filename) input("Press Enter to close.")
def acquire_spec(wls, wle, res, dt, filename): """ wls (float): start wavelength in m wle (float): end wavelength in m res (int): number of points to acquire dt (float): dwell time in seconds filename (str): filename to save to """ # TODO: take a progressive future to update and know if it's the end ebeam = model.getComponent(role="e-beam") sed = model.getComponent(role="se-detector") mchr = model.getComponent(role="monochromator") sgrh = model.getComponent(role="spectrograph") prev_dt = ebeam.dwellTime.value prev_res = ebeam.resolution.value prev_scale = ebeam.scale.value prev_trans = ebeam.translation.value prev_wl = sgrh.position.value["wavelength"] # Create a stream for monochromator scan mchr_s = MonochromatorScanStream("Spectrum", mchr, ebeam, sgrh) mchr_s.startWavelength.value = wls mchr_s.endWavelength.value = wle mchr_s.numberOfPixels.value = res mchr_s.dwellTime.value = dt mchr_s.emtTranslation.value = ebeam.translation.value # Create SEM survey stream survey_s = stream.SEMStream("Secondary electrons survey", sed, sed.data, ebeam, emtvas={"translation", "scale", "resolution", "dwellTime"}, ) # max FoV, with scale 4 survey_s.emtTranslation.value = (0, 0) survey_s.emtScale.value = (4, 4) survey_s.emtResolution.value = (v / 4 for v in ebeam.resolution.range[1]) survey_s.emtDwellTime.value = 10e-6 # 10µs is hopefully enough # Acquire using the acquisition manager # Note: the monochromator scan stream is unknown to the acquisition manager, # so it'll be done last expt = acq.estimateTime([survey_s, mchr_s]) f = acq.acquire([survey_s, mchr_s]) try: # Note: the timeout is important, as it allows to catch KeyboardInterrupt das, e = f.result(2 * expt + 1) except KeyboardInterrupt: logging.info("Stopping before end of acquisition") f.cancel() return finally: logging.debug("Restoring hardware settings") if prev_res != (1, 1): ebeam.resolution.value = prev_res ebeam.dwellTime.value = prev_dt sgrh.moveAbs({"wavelength": prev_wl}) ebeam.scale.value = prev_scale ebeam.translation.value = prev_trans if prev_res != (1, 1): ebeam.resolution.value = prev_res ebeam.dwellTime.value = prev_dt if e: logging.error("Acquisition failed: %s", e) if das: # Save the file exporter = dataio.find_fittest_converter(filename) exporter.export(filename, das) logging.info("Spectrum successfully saved to %s", filename) raw_input("Press Enter to close.")
def acquire(self, dlg): """ Acquisition operation. """ main_data = self.main_app.main_data str_ctrl = main_data.tab.value.streambar_controller stream_paused = str_ctrl.pauseStreams() dlg.pauseSettings() nb = self.numberofAcquisitions.value ss = self._get_acq_streams() sacqt = acq.estimateTime(ss) completed = False try: step_time = self.initAcquisition() logging.debug("Acquisition streams: %s", ss) # TODO: if drift correction, use it over all the time f = model.ProgressiveFuture() f.task_canceller = lambda l: True # To allow cancelling while it's running f.set_running_or_notify_cancel( ) # Indicate the work is starting now dlg.showProgress(f) # list of list of DataArray: for each stream, for each acquisition, the data acquired images = None for i in range(nb): left = nb - i dur = sacqt * left + step_time * (left - 1) logging.debug("Acquisition %d of %d", i, nb) startt = time.time() f.set_progress(end=startt + dur) das, e = acq.acquire( ss, self.main_app.main_data.settings_obs).result() if images is None: # Copy metadata from the first acquisition images = [[] for i in range(len(das))] for im, da in zip(images, das): im.append(da) if f.cancelled(): raise CancelledError() # Execute an action to prepare the next acquisition for the ith acquisition self.stepAcquisition(i, images) f.set_result(None) # Indicate it's over # Construct a cube from each stream's image. images = self.postProcessing(images) # Export image exporter = dataio.find_fittest_converter(self.filename.value) exporter.export(self.filename.value, images) completed = True dlg.Close() except CancelledError: logging.debug("Acquisition cancelled.") dlg.resumeSettings() except e: logging.exception(e) finally: # Do completion actions self.completeAcquisition(completed)
def test_sync_path_guess(self): """ try synchronized acquisition using the Optical Path Manager """ # Create the streams and streamTree opmngr = path.OpticalPathManager(self.microscope) semsur = stream.SEMStream("test sem", self.sed, self.sed.data, self.ebeam) sems = stream.SEMStream("test sem cl", self.sed, self.sed.data, self.ebeam) ars = stream.ARSettingsStream("test ar", self.ccd, self.ccd.data, self.ebeam, opm=opmngr) semars = stream.SEMARMDStream("test SEM/AR", sems, ars) specs = stream.SpectrumSettingsStream("test spec", self.spec, self.spec.data, self.ebeam, opm=opmngr) sps = stream.SEMSpectrumMDStream("test sem-spec", sems, specs) st = stream.StreamTree(streams=[semsur, semars, sps]) # SEM survey settings are via the current hardware settings self.ebeam.dwellTime.value = self.ebeam.dwellTime.range[0] # SEM/AR/SPEC settings are via the AR stream ars.roi.value = (0.1, 0.1, 0.8, 0.8) specs.roi.value = (0.2, 0.2, 0.7, 0.7) mx_brng = self.ccd.binning.range[1] binning = tuple(min(4, mx) for mx in mx_brng) # try binning 4x4 self.ccd.binning.value = binning self.ccd.exposureTime.value = 1 # s ars.repetition.value = (2, 3) specs.repetition.value = (3, 2) num_ar = numpy.prod(ars.repetition.value) est_time = acq.estimateTime(st.getStreams()) # prepare callbacks self.start = None self.end = None self.updates = 0 self.done = 0 # Run acquisition start = time.time() f = acq.acquire(st.getStreams()) f.add_update_callback(self.on_progress_update) f.add_done_callback(self.on_done) data, e = f.result() dur = time.time() - start self.assertGreaterEqual(dur, est_time / 2) # Estimated time shouldn't be too small self.assertIsInstance(data[0], model.DataArray) self.assertIsNone(e) self.assertEqual(len(data), num_ar + 4) thumb = acq.computeThumbnail(st, f) self.assertIsInstance(thumb, model.DataArray) self.assertGreaterEqual(self.updates, 1) # at least one update at end self.assertLessEqual(self.end, time.time()) self.assertEqual(self.done, 1) self.assertTrue(not f.cancelled()) # assert optical path configuration exp_pos = path.SPARC_MODES["spectral"][1] self.assertEqual(self.lenswitch.position.value, exp_pos["lens-switch"]) self.assertEqual(self.spec_det_sel.position.value, exp_pos["spec-det-selector"]) self.assertEqual(self.ar_spec_sel.position.value, exp_pos["ar-spec-selector"])
def test_sync_path_guess(self): """ try synchronized acquisition using the Optical Path Manager """ # Create the streams and streamTree opmngr = path.OpticalPathManager(self.microscope) semsur = stream.SEMStream("test sem", self.sed, self.sed.data, self.ebeam) sems = stream.SEMStream("test sem cl", self.sed, self.sed.data, self.ebeam) ars = stream.ARSettingsStream("test ar", self.ccd, self.ccd.data, self.ebeam, opm=opmngr) semars = stream.SEMARMDStream("test SEM/AR", [sems, ars]) specs = stream.SpectrumSettingsStream("test spec", self.spec, self.spec.data, self.ebeam, opm=opmngr) sps = stream.SEMSpectrumMDStream("test sem-spec", [sems, specs]) st = stream.StreamTree(streams=[semsur, semars, sps]) # SEM survey settings are via the current hardware settings self.ebeam.dwellTime.value = self.ebeam.dwellTime.range[0] # SEM/AR/SPEC settings are via the AR stream ars.roi.value = (0.1, 0.1, 0.8, 0.8) specs.roi.value = (0.2, 0.2, 0.7, 0.7) mx_brng = self.ccd.binning.range[1] binning = tuple(min(4, mx) for mx in mx_brng) # try binning 4x4 self.ccd.binning.value = binning self.ccd.exposureTime.value = 1 # s ars.repetition.value = (2, 3) specs.repetition.value = (3, 2) num_ar = numpy.prod(ars.repetition.value) est_time = acq.estimateTime(st.getProjections()) # prepare callbacks self.start = None self.end = None self.updates = 0 self.done = 0 # Run acquisition start = time.time() f = acq.acquire(st.getProjections()) f.add_update_callback(self.on_progress_update) f.add_done_callback(self.on_done) data, e = f.result() dur = time.time() - start self.assertGreaterEqual(dur, est_time / 2) # Estimated time shouldn't be too small self.assertIsInstance(data[0], model.DataArray) self.assertIsNone(e) self.assertEqual(len(data), num_ar + 4) thumb = acq.computeThumbnail(st, f) self.assertIsInstance(thumb, model.DataArray) self.assertGreaterEqual(self.updates, 1) # at least one update at end self.assertLessEqual(self.end, time.time()) self.assertTrue(not f.cancelled()) # assert optical path configuration exp_pos = path.SPARC_MODES["spectral"][1] self.assertEqual(self.lenswitch.position.value, exp_pos["lens-switch"]) self.assertEqual(self.spec_det_sel.position.value, exp_pos["spec-det-selector"]) self.assertEqual(self.ar_spec_sel.position.value, exp_pos["ar-spec-selector"]) time.sleep(0.1) self.assertEqual(self.done, 1)
def acquire(self, dlg): """ Acquisition operation. """ main_data = self.main_app.main_data str_ctrl = main_data.tab.value.streambar_controller stream_paused = str_ctrl.pauseStreams() dlg.pauseSettings() nb = self.numberofAcquisitions.value ss = self._get_acq_streams() sacqt = acq.estimateTime(ss) completed = False try: step_time = self.initAcquisition() logging.debug("Acquisition streams: %s", ss) # TODO: if drift correction, use it over all the time f = model.ProgressiveFuture() f.task_canceller = lambda l: True # To allow cancelling while it's running f.set_running_or_notify_cancel() # Indicate the work is starting now dlg.showProgress(f) # list of list of DataArray: for each stream, for each acquisition, the data acquired images = None for i in range(nb): left = nb - i dur = sacqt * left + step_time * (left - 1) logging.debug("Acquisition %d of %d", i, nb) if left == 1 and last_ss: ss += last_ss dur += acq.estimateTime(ss) - sacqt startt = time.time() f.set_progress(end=startt + dur) das, e = acq.acquire(ss).result() if images is None: # Copy metadata from the first acquisition images = [[] for i in range(len(das))] for im, da in zip(images, das): im.append(da) if f.cancelled(): raise CancelledError() # Execute an action to prepare the next acquisition for the ith acquisition self.stepAcquisition(i, images) f.set_result(None) # Indicate it's over # Construct a cube from each stream's image. images = self.postProcessing(images) # Export image exporter = dataio.find_fittest_converter(self.filename.value) exporter.export(self.filename.value, images) completed = True dlg.Close() except CancelledError: logging.debug("Acquisition cancelled.") dlg.resumeSettings() except e: logging.exception(e) finally: # Do completion actions self.completeAcquisition(completed)