def _fast_acquire_one(self, dlg, st, last_ss): """ Acquires one stream, *as fast as possible* (ie, the period is not used). Only works with LiveStreams (and not with MDStreams) st (LiveStream) last_ss (list of Streams): all the streams to be acquire on the last time """ # Essentially, we trick a little bit the stream, by convincing it that # we want a live view, but instead of display the data, we store them. # It's much faster because we don't have to stop/start the detector between # each acquisition. nb = self.numberOfAcquisitions.value fn = self.filename.value self._exporter = dataio.find_fittest_converter(fn) bs, ext = splitext(fn) fn_pat = bs + "-%.5d" + ext self._acq_completed = threading.Event() f = model.ProgressiveFuture() f.task_canceller = self._cancel_fast_acquire f._stream = st if last_ss: nb -= 1 extra_dur = acqmng.estimateTime([st] + last_ss) else: extra_dur = 0 self._hijack_live_stream(st, f, nb, fn_pat, extra_dur) try: # Start acquisition and wait until it's done f.set_running_or_notify_cancel( ) # Indicate the work is starting now dlg.showProgress(f) st.is_active.value = True self._acq_completed.wait() if f.cancelled(): dlg.resumeSettings() return finally: st.is_active.value = False # just to be extra sure it's stopped logging.debug("Restoring stream %s", st) self._restore_live_stream(st) # last "normal" acquisition, if needed if last_ss: logging.debug("Acquiring last acquisition, with all the streams") ss = [st] + last_ss f.set_progress(end=time.time() + acqmng.estimateTime(ss)) das, e = acqmng.acquire( ss, self.main_app.main_data.settings_obs).result() self._save_data(fn_pat % (nb, ), das) self._stop_saving_threads() # Wait for all the data to be stored f.set_result(None) # Indicate it's over
def _acquire_multi(self, dlg, ss, last_ss): p = self.period.value nb = self.numberOfAcquisitions.value fn = self.filename.value self._exporter = dataio.find_fittest_converter(fn) bs, ext = splitext(fn) fn_pat = bs + "-%.5d" + ext sacqt = acqmng.estimateTime(ss) intp = max(0, p - sacqt) if p < sacqt: logging.warning( "Acquisition will take %g s, but period between acquisition must be only %g s", sacqt, p) # TODO: if drift correction, use it over all the time f = model.ProgressiveFuture() f.task_canceller = lambda l: True # To allow cancelling while it's running f.set_running_or_notify_cancel() # Indicate the work is starting now dlg.showProgress(f) for i in range(nb): left = nb - i dur = sacqt * left + intp * (left - 1) if left == 1 and last_ss: ss += last_ss dur += acqmng.estimateTime(ss) - sacqt startt = time.time() f.set_progress(end=startt + dur) das, e = acqmng.acquire( ss, self.main_app.main_data.settings_obs).result() if f.cancelled(): dlg.resumeSettings() return self._save_data(fn_pat % (i, ), das) # Wait the period requested, excepted the last time if left > 1: sleept = (startt + p) - time.time() if sleept > 0: time.sleep(sleept) else: logging.info( "Immediately starting next acquisition, %g s late", -sleept) self._stop_saving_threads() # Wait for all the data to be stored f.set_result(None) # Indicate it's over
def acquire(self, dlg): main_data = self.main_app.main_data str_ctrl = main_data.tab.value.streambar_controller stream_paused = str_ctrl.pauseStreams() dlg.pauseSettings() self._start_saving_threads(4) ss, last_ss = self._get_acq_streams() sacqt = acqmng.estimateTime(ss) p = self.period.value nb = self.numberOfAcquisitions.value try: # If the user just wants to acquire as fast as possible, and there # a single stream, we can use an optimised version if (len(ss) == 1 and isinstance(ss[0], LiveStream) and nb >= 2 and sacqt < 5 and p < sacqt + Stream.SETUP_OVERHEAD): logging.info( "Fast timelapse detected, will acquire as fast as possible" ) self._fast_acquire_one(dlg, ss[0], last_ss) else: self._acquire_multi(dlg, ss, last_ss) finally: # Make sure the threads are stopped even in case of error self._stop_saving_threads() # self.showAcquisition(self.filename.value) logging.debug("Closing dialog") dlg.Close()
def estimateTime(self, remaining=None): """ Estimates duration for acquisition and stitching. :param remaining: (int > 0) The number of remaining tiles :returns: (float) estimated required time """ if remaining is None: remaining = self._nx * self._ny acq_time = 0 for stream in self._streams: acq_stream_time = acqmng.estimateTime([stream]) if stream.focuser is not None and len(self._zlevels) > 1: # Acquisition time for each stream will be multiplied by the number of zstack levels acq_stream_time *= len(self._zlevels) acq_time += acq_stream_time # Estimate stitching time based on number of pixels in the overlapping part max_pxs = 0 for s in self._streams: for sda in s.raw: pxs = sda.shape[0] * sda.shape[1] if pxs > max_pxs: max_pxs = pxs stitch_time = (self._nx * self._ny * max_pxs * self._overlap) / self.STITCH_SPEED try: move_time = max(self._guessSmallestFov( self._streams)) * (remaining - 1) / self._move_speed # current tile is part of remaining, so no need to move there except ValueError: # no current streams move_time = 0.5 return acq_time * remaining + move_time + stitch_time
def _acquire(self, dlg, future): # Stop the streams dlg.streambar_controller.pauseStreams() # Acquire (even if it was live, to be sure it's the data is up-to-date) ss = self._get_acq_streams() dur = acqmng.estimateTime(ss) startt = time.time() future._cur_f = InstantaneousFuture() future.task_canceller = self._acq_canceller future.set_running_or_notify_cancel( ) # Indicate the work is starting now future.set_progress(end=startt + dur) dlg.showProgress(future) future._cur_f = acqmng.acquire(ss, self.main_app.main_data.settings_obs) das, e = future._cur_f.result() if future.cancelled(): raise CancelledError() if e: raise e return das
def estimateTime(self, remaining=None): """ Estimates duration for acquisition and stitching. :param remaining: (int > 0) The number of remaining tiles :returns: (float) estimated required time """ if remaining is None: remaining = self._nx * self._ny acq_time = acqmng.estimateTime(self._streams) # Estimate stitching time based on number of pixels in the overlapping part max_pxs = 0 for s in self._streams: for sda in s.raw: pxs = sda.shape[0] * sda.shape[1] if pxs > max_pxs: max_pxs = pxs stitch_time = (self._nx * self._ny * max_pxs * self._overlap) / self.STITCH_SPEED try: move_time = max(self._guessSmallestFov( self._streams)) * (remaining - 1) / self.MOVE_SPEED # current tile is part of remaining, so no need to move there except ValueError: # no current streams move_time = 0.5 return acq_time * remaining + move_time + stitch_time
def update_acquisition_time(self): if self._ellipsis_animator: # cancel if there is an ellipsis animator updating the status message self._ellipsis_animator.cancel() self._ellipsis_animator = None # Don't update estimated time if acquisition is running (as we are # sharing the label with the estimated time-to-completion). if self._main_data_model.is_acquiring.value: return lvl = None # icon status shown if self._main_data_model.is_preparing.value: txt = u"Optical path is being reconfigured…" self._ellipsis_animator = EllipsisAnimator(txt, self.lbl_acqestimate) self._ellipsis_animator.start() lvl = logging.INFO elif self._roa.value == UNDEFINED_ROI: # TODO: update the default text to be the same txt = u"Region of acquisition needs to be selected" lvl = logging.WARN else: streams = self._tab_data_model.acquisitionStreams acq_time = acqmng.estimateTime(streams) acq_time = math.ceil(acq_time) # round a bit pessimistic txt = u"Estimated time is {}." txt = txt.format(units.readable_time(acq_time)) logging.debug("Updating status message %s, with level %s", txt, lvl) self.lbl_acqestimate.SetLabel(txt) self._show_status_icons(lvl)
def estimate_time(self, remaining=None): """ Estimates duration for acquisition and stitching. """ ss, stitch_ss = self._get_acq_streams() if remaining is None: remaining = self.nx.value * self.ny.value acqt = acqmng.estimateTime(ss) if self.stitch.value: # Estimate stitching time based on number of pixels in the overlapping part max_pxs = 0 for s in stitch_ss: for sda in s.raw: pxs = sda.shape[0] * sda.shape[1] if pxs > max_pxs: max_pxs = pxs stitcht = self.nx.value * self.ny.value * max_pxs * self.overlap.value * self.STITCH_SPEED else: stitcht = 0 try: movet = max( self._guess_smallest_fov()) * self.MOVE_SPEED * (remaining - 1) # current tile is part of remaining, so no need to move there except ValueError: # no current streams movet = 0.5 return acqt * remaining + movet + stitcht
def test_sync_sem_ccd(self): """ try acquisition with fairly complex SEM/CCD stream """ # Create the streams and streamTree semsur = stream.SEMStream("test sem", self.sed, self.sed.data, self.ebeam) sems = stream.SEMStream("test sem cl", self.sed, self.sed.data, self.ebeam) ars = stream.ARSettingsStream("test ar", self.ccd, self.ccd.data, self.ebeam) semars = stream.SEMARMDStream("test SEM/AR", [sems, ars]) st = stream.StreamTree(streams=[semsur, semars]) # SEM survey settings are via the current hardware settings self.ebeam.dwellTime.value = self.ebeam.dwellTime.range[0] # SEM/AR settings are via the AR stream ars.roi.value = (0.1, 0.1, 0.8, 0.8) mx_brng = self.ccd.binning.range[1] binning = tuple(min(4, mx) for mx in mx_brng) # try binning 4x4 self.ccd.binning.value = binning self.ccd.exposureTime.value = 1 # s ars.repetition.value = (2, 3) num_ar = numpy.prod(ars.repetition.value) est_time = acqmng.estimateTime(st.getProjections()) # prepare callbacks self.start = None self.end = None self.updates = 0 self.done = 0 # Run acquisition start = time.time() f = acqmng.acquire(st.getProjections()) f.add_update_callback(self.on_progress_update) f.add_done_callback(self.on_done) data, e = f.result() dur = time.time() - start self.assertGreaterEqual(dur, est_time / 2) # Estimated time shouldn't be too small self.assertIsInstance(data[0], model.DataArray) self.assertIsNone(e) self.assertEqual(len(data), num_ar + 2) thumb = acqmng.computeThumbnail(st, f) self.assertIsInstance(thumb, model.DataArray) self.assertGreaterEqual(self.updates, 1) # at least one update at end self.assertLessEqual(self.end, time.time()) self.assertTrue(not f.cancelled()) time.sleep(0.1) self.assertEqual(self.done, 1)
def _update_exp_dur(self, _=None): """ Called when VA that affects the expected duration is changed """ nb = self.numberOfAcquisitions.value p = self.period.value ss, last_ss = self._get_acq_streams() sacqt = acqmng.estimateTime(ss) logging.debug("Estimating %g s acquisition for %d streams", sacqt, len(ss)) intp = max(0, p - sacqt) dur = sacqt * nb + intp * (nb - 1) if last_ss: dur += acqmng.estimateTime(ss + last_ss) - sacqt # Use _set_value as it's read only self.expectedDuration._set_value(math.ceil(dur), force_write=True)
def update_acquisition_time(self): """ Must be called in the main GUI thread. """ streams = self.get_acq_streams() if streams: acq_time = acqmng.estimateTime(streams) acq_time = math.ceil(acq_time) # round a bit pessimisticly txt = "The estimated acquisition time is {}." txt = txt.format(units.readable_time(acq_time)) else: txt = "No streams present." self.lbl_acqestimate.SetLabel(txt)
def _update_exp_dur(self, _=None): """ Called when VA that affects the expected duration is changed. """ if self._survey_stream is None: return strs = [self._survey_stream, self._secom_sem_cl_stream] dur = acqmng.estimateTime(strs) logging.debug("Estimating %g s acquisition for %d streams", dur, len(strs)) # Use _set_value as it's read only self.expectedDuration._set_value(math.ceil(dur), force_write=True)
def _update_exp_dur(self, _=None): """ Called when VA that affects the expected duration is changed """ nsteps = self.numberofAcquisitions.value step_time = self._estimate_step_duration() ss = self._get_acq_streams() sacqt = acqmng.estimateTime(ss) logging.debug("Estimating %g s acquisition for %d streams", sacqt, len(ss)) dur = sacqt * nsteps + step_time * (nsteps - 1) # Use _set_value as it's read only self.expectedDuration._set_value(math.ceil(dur), force_write=True)
def _update_exp_dur(self, _=None): """ Called when VA that affects the expected duration is changed """ nsteps = self.numberOfAcquisitions.value step_time = self._estimate_step_duration() ss = self._get_acq_streams() sacqt = acqmng.estimateTime(ss) if self._streams_intertwined: # Moving the focus will have to be done for every stream dur = sacqt * nsteps + step_time * (nsteps - 1) * len(ss) else: dur = sacqt * nsteps + step_time * (nsteps - 1) logging.debug("Estimating acquisition of %d streams will take %g s", len(ss), dur) # Use _set_value as it's read only self.expectedDuration._set_value(math.ceil(dur), force_write=True)
def acquire_spec(wls, wle, res, dt, filename): """ wls (float): start wavelength in m wle (float): end wavelength in m res (int): number of points to acquire dt (float): dwell time in seconds filename (str): filename to save to """ # TODO: take a progressive future to update and know if it's the end ebeam = model.getComponent(role="e-beam") sed = model.getComponent(role="se-detector") mchr = model.getComponent(role="monochromator") try: sgrh = model.getComponent(role="spectrograph") except LookupError: sgrh = model.getComponent(role="spectrograph-dedicated") opm = acq.path.OpticalPathManager(model.getMicroscope()) prev_dt = ebeam.dwellTime.value prev_res = ebeam.resolution.value prev_scale = ebeam.scale.value prev_trans = ebeam.translation.value prev_wl = sgrh.position.value["wavelength"] # Create a stream for monochromator scan mchr_s = MonochromatorScanStream("Spectrum", mchr, ebeam, sgrh, opm=opm) mchr_s.startWavelength.value = wls mchr_s.endWavelength.value = wle mchr_s.numberOfPixels.value = res mchr_s.dwellTime.value = dt mchr_s.emtTranslation.value = ebeam.translation.value # Create SEM survey stream survey_s = stream.SEMStream( "Secondary electrons survey", sed, sed.data, ebeam, emtvas={"translation", "scale", "resolution", "dwellTime"}, ) # max FoV, with scale 4 survey_s.emtTranslation.value = (0, 0) survey_s.emtScale.value = (4, 4) survey_s.emtResolution.value = (v / 4 for v in ebeam.resolution.range[1]) survey_s.emtDwellTime.value = 10e-6 # 10µs is hopefully enough # Acquire using the acquisition manager # Note: the monochromator scan stream is unknown to the acquisition manager, # so it'll be done last expt = acqmng.estimateTime([survey_s, mchr_s]) f = acqmng.acquire([survey_s, mchr_s]) try: # Note: the timeout is important, as it allows to catch KeyboardInterrupt das, e = f.result(2 * expt + 1) except KeyboardInterrupt: logging.info("Stopping before end of acquisition") f.cancel() return finally: logging.debug("Restoring hardware settings") if prev_res != (1, 1): ebeam.resolution.value = prev_res ebeam.dwellTime.value = prev_dt sgrh.moveAbs({"wavelength": prev_wl}) ebeam.scale.value = prev_scale ebeam.translation.value = prev_trans if prev_res != (1, 1): ebeam.resolution.value = prev_res ebeam.dwellTime.value = prev_dt if e: logging.error("Acquisition failed: %s", e) if das: # Save the file exporter = dataio.find_fittest_converter(filename) exporter.export(filename, das) logging.info("Spectrum successfully saved to %s", filename) input("Press Enter to close.")
def test_acq_fine_align(self): """ try acquisition with SEM + Optical + overlay streams """ # Create the streams sems = stream.SEMStream("test sem", self.sed, self.sed.data, self.ebeam) # SEM settings are via the current hardware settings self.ebeam.dwellTime.value = self.ebeam.dwellTime.range[0] fs1 = stream.FluoStream("test orange", self.ccd, self.ccd.data, self.light, self.light_filter) fs1.excitation.value = sorted(fs1.excitation.choices)[0] fs1.emission.value = sorted(fs1.emission.choices)[-1] fs2 = stream.FluoStream("test blue", self.ccd, self.ccd.data, self.light, self.light_filter) fs2.excitation.value = sorted(fs2.excitation.choices)[-1] fs2.emission.value = sorted(fs2.emission.choices)[-1] self.ccd.exposureTime.value = 0.1 # s ovrl = stream.OverlayStream("overlay", self.ccd, self.ebeam, self.sed) ovrl.dwellTime.value = 0.3 ovrl.repetition.value = (4, 4) streams = [sems, fs1, fs2, ovrl] est_time = acqmng.estimateTime(streams) sum_est_time = sum(s.estimateAcquisitionTime() for s in streams) self.assertGreaterEqual(est_time, sum_est_time) # prepare callbacks self.start = None self.end = None self.updates = 0 self.done = 0 # Run acquisition start = time.time() f = acqmng.acquire(streams) f.add_update_callback(self.on_progress_update) f.add_done_callback(self.on_done) data, e = f.result() dur = time.time() - start self.assertGreater(dur, est_time / 2) # Estimated time shouldn't be too small self.assertIsInstance(data[0], model.DataArray) self.assertIsNone(e) # Check there was no exception self.assertEqual(len(data), len(streams) - 1) # No overlay correction metadata anywhere (it has all been merged) for d in data: for k in [model.MD_ROTATION_COR, model.MD_PIXEL_SIZE_COR, model.MD_POS_COR]: self.assertNotIn(k, d.metadata) # thumb = acqmng.computeThumbnail(st, f) # self.assertIsInstance(thumb, model.DataArray) self.assertGreaterEqual(self.updates, 1) # at least one update at end self.assertLessEqual(self.end, time.time()) self.assertTrue(not f.cancelled()) # make sure the callback had time to be called time.sleep(0.1) self.assertEqual(self.done, 1)
def acquire(self, dlg): """ Acquisition operation. """ main_data = self.main_app.main_data str_ctrl = main_data.tab.value.streambar_controller stream_paused = str_ctrl.pauseStreams() dlg.pauseSettings() nb = self.numberOfAcquisitions.value ss = self._get_acq_streams() sacqt = acqmng.estimateTime(ss) logging.debug("Acquisition streams: %s", ss) # all_ss is a list of list of streams to acquire. In theory, we could do # several set of acquisitions with each a set of streams. However, that's # not how it's used. It's just a generic way to handle both cases: # either each acquisition has only one stream, or there is a single # acquisition to do all the stream. if self._streams_intertwined: # Streams are fastest changed: for each step, all streams are acquired all_ss = [ss] else: # Streams are slowest changed: for each stream, do all steps together all_ss = [[s] for s in ss] try: # list of list of list of DataArray: for each acquisition, for each stream, for each step, the data acquired all_images = [[] for _ in all_ss] completed = False step_time = self.initAcquisition() # TODO: if drift correction, use it over all the time f = model.ProgressiveFuture() f.task_canceller = lambda l: True # To allow cancelling while it's running f.set_running_or_notify_cancel() # Indicate the work is starting now dlg.showProgress(f) total_nb = left = len(all_ss) * nb logging.debug("Will repeat the acquisition %d times", len(all_ss)) for ss, images in zip(all_ss, all_images): for i in range(nb): dur = sacqt * left + step_time * (left - 1) logging.debug("Acquisition %d of %d", total_nb - left, total_nb) startt = time.time() f.set_progress(end=startt + dur) # Prepare the axis for this step self.preStepAcquisition(i) das, e = acqmng.acquire(ss, self.main_app.main_data.settings_obs).result() if e: logging.warning("Will continue, although acquisition %d partially failed: %s", e) if len(das) != len(ss): logging.warning("Expected %d DataArrays, but got %d", len(ss), len(das)) if not images: images[:] = [[] for _ in das] for im, da in zip(images, das): im.append(da) if f.cancelled(): raise CancelledError() # Clean-up or adjust the images self.postStepAcquisition(i, images) left -= 1 # Collate back all the data as "for each stream, all the images acquired" images = [] for ii in all_images: images.extend(ii) # Construct a cube from each stream's image. images = self.postProcessing(images) # Export image exporter = dataio.find_fittest_converter(self.filename.value) exporter.export(self.filename.value, images) f.set_result(None) # Indicate it's over completed = True dlg.Close() except CancelledError: logging.debug("Acquisition cancelled.") dlg.resumeSettings() except Exception as e: logging.exception(e) finally: # Do completion actions self.completeAcquisition(completed)
def acquire(self, dlg): """ Acquisition operation. """ main_data = self.main_app.main_data str_ctrl = main_data.tab.value.streambar_controller stream_paused = str_ctrl.pauseStreams() dlg.pauseSettings() nb = self.numberofAcquisitions.value ss = self._get_acq_streams() sacqt = acqmng.estimateTime(ss) completed = False try: step_time = self.initAcquisition() logging.debug("Acquisition streams: %s", ss) # TODO: if drift correction, use it over all the time f = model.ProgressiveFuture() f.task_canceller = lambda l: True # To allow cancelling while it's running f.set_running_or_notify_cancel( ) # Indicate the work is starting now dlg.showProgress(f) # list of list of DataArray: for each stream, for each acquisition, the data acquired images = None for i in range(nb): left = nb - i dur = sacqt * left + step_time * (left - 1) logging.debug("Acquisition %d of %d", i, nb) startt = time.time() f.set_progress(end=startt + dur) das, e = acqmng.acquire( ss, self.main_app.main_data.settings_obs).result() if images is None: # Copy metadata from the first acquisition images = [[] for i in range(len(das))] for im, da in zip(images, das): im.append(da) if f.cancelled(): raise CancelledError() # Execute an action to prepare the next acquisition for the ith acquisition self.stepAcquisition(i, images) f.set_result(None) # Indicate it's over # Construct a cube from each stream's image. images = self.postProcessing(images) # Export image exporter = dataio.find_fittest_converter(self.filename.value) exporter.export(self.filename.value, images) completed = True dlg.Close() except CancelledError: logging.debug("Acquisition cancelled.") dlg.resumeSettings() except e: logging.exception(e) finally: # Do completion actions self.completeAcquisition(completed)
def test_sync_path_guess(self): """ try synchronized acquisition using the Optical Path Manager """ # Create the streams and streamTree opmngr = path.OpticalPathManager(self.microscope) semsur = stream.SEMStream("test sem", self.sed, self.sed.data, self.ebeam) sems = stream.SEMStream("test sem cl", self.sed, self.sed.data, self.ebeam) ars = stream.ARSettingsStream("test ar", self.ccd, self.ccd.data, self.ebeam, opm=opmngr) semars = stream.SEMARMDStream("test SEM/AR", [sems, ars]) specs = stream.SpectrumSettingsStream("test spec", self.spec, self.spec.data, self.ebeam, opm=opmngr) sps = stream.SEMSpectrumMDStream("test sem-spec", [sems, specs]) st = stream.StreamTree(streams=[semsur, semars, sps]) # SEM survey settings are via the current hardware settings self.ebeam.dwellTime.value = self.ebeam.dwellTime.range[0] # SEM/AR/SPEC settings are via the AR stream ars.roi.value = (0.1, 0.1, 0.8, 0.8) specs.roi.value = (0.2, 0.2, 0.7, 0.7) mx_brng = self.ccd.binning.range[1] binning = tuple(min(4, mx) for mx in mx_brng) # try binning 4x4 self.ccd.binning.value = binning self.ccd.exposureTime.value = 1 # s ars.repetition.value = (2, 3) specs.repetition.value = (3, 2) num_ar = numpy.prod(ars.repetition.value) est_time = acqmng.estimateTime(st.getProjections()) # prepare callbacks self.start = None self.end = None self.updates = 0 self.done = 0 # Run acquisition start = time.time() f = acqmng.acquire(st.getProjections()) f.add_update_callback(self.on_progress_update) f.add_done_callback(self.on_done) data, e = f.result() dur = time.time() - start self.assertGreaterEqual(dur, est_time / 2) # Estimated time shouldn't be too small self.assertIsInstance(data[0], model.DataArray) self.assertIsNone(e) self.assertEqual(len(data), num_ar + 4) thumb = acqmng.computeThumbnail(st, f) self.assertIsInstance(thumb, model.DataArray) self.assertGreaterEqual(self.updates, 1) # at least one update at end self.assertLessEqual(self.end, time.time()) self.assertTrue(not f.cancelled()) # assert optical path configuration exp_pos = path.SPARC_MODES["spectral"][1] self.assertEqual(self.lenswitch.position.value, exp_pos["lens-switch"]) self.assertEqual(self.spec_det_sel.position.value, exp_pos["spec-det-selector"]) self.assertEqual(self.ar_spec_sel.position.value, exp_pos["ar-spec-selector"]) time.sleep(0.1) self.assertEqual(self.done, 1)
def test_leech(self): """ try acquisition with leech """ # Create the streams and streamTree semsur = stream.SEMStream("test sem", self.sed, self.sed.data, self.ebeam) sems = stream.SEMStream("test sem cl", self.sed, self.sed.data, self.ebeam) ars = stream.ARSettingsStream("test ar", self.ccd, self.ccd.data, self.ebeam) semars = stream.SEMARMDStream("test SEM/AR", [sems, ars]) st = stream.StreamTree(streams=[semsur, semars]) pcd = Fake0DDetector("test") pca = ProbeCurrentAcquirer(pcd) sems.leeches.append(pca) semsur.leeches.append(pca) # SEM survey settings are via the current hardware settings self.ebeam.dwellTime.value = self.ebeam.dwellTime.range[0] # SEM/AR settings are via the AR stream ars.roi.value = (0.1, 0.1, 0.8, 0.8) mx_brng = self.ccd.binning.range[1] binning = tuple(min(4, mx) for mx in mx_brng) # try binning 4x4 self.ccd.binning.value = binning self.ccd.exposureTime.value = 1 # s ars.repetition.value = (2, 3) num_ar = numpy.prod(ars.repetition.value) pca.period.value = 10 # Only at beginning and end est_time = acqmng.estimateTime(st.getProjections()) # prepare callbacks self.start = None self.end = None self.updates = 0 self.done = 0 # Run acquisition start = time.time() f = acqmng.acquire(st.getProjections()) f.add_update_callback(self.on_progress_update) f.add_done_callback(self.on_done) data, e = f.result() dur = time.time() - start self.assertGreaterEqual(dur, est_time / 2) # Estimated time shouldn't be too small self.assertIsInstance(data[0], model.DataArray) self.assertIsNone(e) self.assertEqual(len(data), num_ar + 2) thumb = acqmng.computeThumbnail(st, f) self.assertIsInstance(thumb, model.DataArray) self.assertGreaterEqual(self.updates, 1) # at least one update at end self.assertLessEqual(self.end, time.time()) self.assertTrue(not f.cancelled()) time.sleep(0.1) self.assertEqual(self.done, 1) for da in data: pcmd = da.metadata[model.MD_EBEAM_CURRENT_TIME] self.assertEqual(len(pcmd), 2)