Ejemplo n.º 1
0
    def _doWholeAcquisition(self, electron_coordinates, scale):
        """
        Perform acquisition with one optical image for all the spots.
        It's faster, but it's harder to separate the spots.
        """
        escan = self.escan
        ccd = self.ccd
        detector = self.detector
        dwell_time = self.dwell_time

        # order matters
        escan.scale.value = scale
        escan.resolution.value = self.repetitions
        escan.translation.value = (0, 0)

        # Scan at least 10 times, to avoids CCD/SEM synchronization problems
        sem_dt = escan.dwellTime.clip(dwell_time / 10)
        escan.dwellTime.value = sem_dt
        # For safety, ensure the exposure time is at least twice the time for a whole scan
        if dwell_time < 2 * sem_dt:
            dwell_time = 2 * sem_dt
            logging.info(
                "Increasing dwell time to %g s to avoid synchronization problems",
                dwell_time)

        # CCD setup
        ccd.binning.value = (1, 1)
        ccd.resolution.value = ccd.shape[0:2]
        et = numpy.prod(self.repetitions) * dwell_time
        ccd.exposureTime.value = et  # s
        readout = numpy.prod(ccd.resolution.value) / ccd.readoutRate.value
        tot_time = et + readout + 0.05

        try:
            if self._acq_state == CANCELLED:
                raise CancelledError()

            if self.bgsub:
                self.bg_image = ccd.data.get(asap=False)
            detector.data.subscribe(self._discard_data)
            self._min_acq_time = time.time()
            ccd.data.subscribe(self._onCCDImage)
            logging.debug("Scanning spot grid...")

            # Wait for CCD to capture the image
            if not self._ccd_done.wait(2 * tot_time + 4):
                raise TimeoutError("Acquisition of CCD timed out")

            with self._acq_lock:
                if self._acq_state == CANCELLED:
                    raise CancelledError()
                logging.debug("Scan done.")
                self._acq_state = FINISHED
        finally:
            detector.data.unsubscribe(self._discard_data)
            ccd.data.unsubscribe(self._onCCDImage)

        return self._optical_image, electron_coordinates, scale
Ejemplo n.º 2
0
def _DoAcquisition(future, escan, ccd, detector, light):
    _sem_done.clear()

    try:
        if future._acq_state == CANCELLED:
            raise CancelledError()

        logging.debug("Acquiring CCD images...")

        # Turn on light for CCD acquisition
        intensities = [1, 0, 0, 0, 0, 0, 0]
        light.power.value = [
            ints * pw for ints, pw in zip(intensities, light.power.range[1])
        ]

        optical_image_1 = ccd.data.get()

        intensities = [0, 1, 0, 0, 0, 0, 0]
        light.power.value = [
            ints * pw for ints, pw in zip(intensities, light.power.range[1])
        ]

        optical_image_2 = ccd.data.get()

        intensities = [0, 0, 1, 0, 0, 0, 0]
        light.power.value = [
            ints * pw for ints, pw in zip(intensities, light.power.range[1])
        ]

        optical_image_3 = ccd.data.get()

        with _acq_lock:
            if future._acq_state == CANCELLED:
                raise CancelledError()
            logging.debug("Acquisition done.")
            future._acq_state = FINISHED

        # Turn off light for CCD acquisition
        light.power.value = light.power.range[0]

        logging.debug("Acquiring SEM image...")

        detector.data.subscribe(_ssOnSEMImage)
        # Wait for SEM to capture the image
        if not _sem_done.wait(2 * numpy.prod(escan.resolution.value) *
                              escan.dwellTime.value + 4):
            raise TimeoutError("Acquisition of SEM timed out")

        detector.data.unsubscribe(_ssOnSEMImage)

    finally:
        detector.data.unsubscribe(_ssOnSEMImage)

    return optical_image_1, optical_image_2, optical_image_3, detector.data._electron_image
Ejemplo n.º 3
0
def _doTurnOnLight(f, bl, ccd):
    try:
        # We need the light to be off, so that we can notice a difference when
        # it turns on.
        # In case it's already turned on, just assume everything is fine.
        if bl.emissions.value[0] * bl.power.value != 0:
            logging.debug("The light is already on")
            return
        if f._task_state == CANCELLED:
            raise CancelledError()

        # Light turned off, if indeed it's all "black", the avg intensity should
        # roughly correspond to the maximum noise level.
        img_light_off = ccd.data.get(asap=False)
        avg_intensity_off = numpy.average(img_light_off)
        # Intensity which is for sure not caused by noise: +150% of the noise level
        intensity_min_on = avg_intensity_off * 1.5 + 0.1
        logging.debug("Looking for intensity of %s in an %s image",
                      intensity_min_on, img_light_off.shape)
        # Turn the light on, full power!
        bl.power.value = bl.power.range[1]
        bl.emissions.value = [1] * len(bl.emissions.value)
        while True:
            img2 = ccd.data.get()
            try:
                new_img = img.Subtract(img2, img_light_off)
            except ValueError:  # could happen if CCD changed resolution
                new_img = img2 - avg_intensity_off
            if f._task_state == CANCELLED:
                raise CancelledError()
            # number of pixels with higher intensity than the avg minimum
            pixels_high_intensity = numpy.sum(new_img > intensity_min_on)
            # the percent of pixels that have intensity higher than the avg minimum
            a = pixels_high_intensity / new_img.size
            # check whether this percent is larger than 0.5% which indicates that the light is on
            if a > 0.005:
                logging.debug("Detected light on (%f %% pixels above %f)",
                              a * 100, intensity_min_on)
                break
            logging.debug("No light detected (%f %% pixels above %f)", a * 100,
                          intensity_min_on)

    except CancelledError:
        raise  # Just don't log the exception
    except Exception:
        logging.exception("Failure while turning on light %s", bl.name)
        raise
    finally:
        with f._task_lock:
            if f._task_state == CANCELLED:
                raise CancelledError()
            f._task_state = FINISHED
Ejemplo n.º 4
0
    def _on_future_done(self, f):
        """
        Called whenever a single sub-future is finished.
        If all sub-futures are finished, the result on the batch future will be set (None).
        If an exception occurred during the execution of the sub-future or the sub-future was cancelled,
        the exception/cancellation will be propagated towards the batch future and handled there.

        :param f: (ProgressiveFuture) A single sub-future.
        """
        self.set_progress(end=self._estimate_end(
        ))  # set the progress for batch future (all futures not done yet)

        # Set exception if future failed and cancel all other sub-futures
        try:
            ex = f.exception(
            )  # raises CancelledError if cancelled, otherwise returns error
            if ex:
                self.cancel()
                self.set_exception(ex)
                return
        except CancelledError:
            if self.cancel():  # if cancelling works
                return
            else:
                self.set_exception(CancelledError())  # if cancelling fails
                return

        # If everything is fine:
        # Set result if all futures are done
        if all(f.done() for f in self.futures):
            # always return None, it's not clear what the return value of a batch of tasks should be
            # alternative would be the return value of the last task, but that is also ambiguous because
            # we don't require the futures to be carried out sequentially
            self.set_result(None)
Ejemplo n.º 5
0
 def x(self):
     """Block the main thead until future finish, return the future.result()."""
     with self._condition:
         result = None
         if not self.done():
             self._condition.wait(self._timeout)
         if not self.done():
             # timeout
             self.set_exception(TimeoutError())
         if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
             # cancelled
             result = CancelledError()
         elif self._state == FINISHED:
             # finished
             if self._exception:
                 result = self._exception
             else:
                 result = self._result
         if isinstance(result, Exception):
             if self.catch_exception:
                 result = FailureException(result)
                 return result
             else:
                 raise result
         return result
Ejemplo n.º 6
0
    def _run(self):
        """
        To be called to start the acquisition in the stream, and blocks until
        the task is finished
        returns (list of DataArray): acquisition data
        raises CancelledError if the acquisition was cancelled
        """
        estt = self._stream.estimateAcquisitionTime()

        # call prepare explicitly just to make sure that the preparation is
        # already done once we start waiting for the acquisition
        f = self._stream.prepare()
        f.result()

        # start stream
        self._startt = time.time()
        self._stream.image.subscribe(self._image_listener)
        # TODO: if exception during activation, it will not be passed here
        # as the VA will just log it. => change _onActive to be a setter, or
        # check also the .status VA.
        self._stream.is_active.value = True

        # wait until one image acquired or cancelled
        if not self._acq_over.wait(10 * estt + 5):
            raise IOError("Acquisition of stream %s timed out after %f s" %
                          (self._stream.name.value, 10 * estt + 5))

        with self._condition:
            if self._state in (CANCELLED, CANCELLED_AND_NOTIFIED):
                raise CancelledError()

        return self._stream.raw  # the acquisition data
Ejemplo n.º 7
0
    def _acquire(self, dlg, future):
        # Stop the streams
        dlg.streambar_controller.pauseStreams()

        # Acquire (even if it was live, to be sure it's the data is up-to-date)
        ss = self._get_acq_streams()
        dur = acqmng.estimateTime(ss)
        startt = time.time()
        future._cur_f = InstantaneousFuture()
        future.task_canceller = self._acq_canceller
        future.set_running_or_notify_cancel(
        )  # Indicate the work is starting now
        future.set_progress(end=startt + dur)
        dlg.showProgress(future)

        future._cur_f = acqmng.acquire(ss,
                                       self.main_app.main_data.settings_obs)
        das, e = future._cur_f.result()
        if future.cancelled():
            raise CancelledError()

        if e:
            raise e

        return das
Ejemplo n.º 8
0
    def run(self):
        """
        Runs the acquisition
        """
        assert (self._current_stream is None)  # Task should be used only once
        expected_time = numpy.sum(self._streamTimes.values())
        # no need to set the start time of the future: it's automatically done
        # when setting its state to running.
        self._future.set_end_time(time.time() + expected_time)

        raw_images = {}  # stream -> list of raw images
        for s in self._streams:
            # Get the future of the acquisition, depending on the Stream type
            if hasattr(s, "acquire"):
                f = s.acquire()
            else:  # fall-back to old style stream
                f = _futures.wrapSimpleStreamIntoFuture(s)
            self._current_future = f
            self._current_stream = s
            self._streams_left.discard(s)

            # in case acquisition was cancelled, before the future was set
            if self._cancelled:
                f.cancel()
                raise CancelledError()

            # If it's a ProgressiveFuture, listen to the time update
            try:
                f.add_update_callback(self._on_progress_update)
            except AttributeError:
                pass  # not a ProgressiveFuture, fine

            # Wait for the acquisition to be finished.
            # Will pass down exceptions, included in case it's cancelled
            raw_images[s] = f.result()

            # update the time left
            expected_time -= self._streamTimes[s]
            self._future.set_end_time(time.time() + expected_time)

        # TODO: if the stream is OverlayStream, apply the metadata to all the
        # data from an optical stream. => put the data
        self._adjust_metadata(raw_images)

        # return all the raw data as one large array
        ret = []
        for v in raw_images.values():
            ret.extend(v)
        return ret
Ejemplo n.º 9
0
 def _cancellable_task(self, future, dur=0):
     """
     Fake task
     future
     dur (float): time to wait
     return (float): dur
     """
     now = time.time()
     end = now + dur
     while now < end:
         left = end - now
         ms = future._must_stop.wait(max(0, left))
         if ms:
             raise CancelledError()
         now = time.time()
     return dur
Ejemplo n.º 10
0
    def _run(self):
        """
        To be called to start the acquisition in the stream, and blocks until
        the task is finished
        returns (list of DataArray): acquisition data
        raises CancelledError if the acquisition was cancelled
        """
        estt = self._stream.estimateAcquisitionTime()

        # The standard is_active acquisition doesn't care about the leeches,
        # so when imitating .acquire(), we need to call the leeches too.
        # We acquire just a single time, which we state by a shape of (1).
        leech_np = []
        for l in self._stream.leeches:
            np = l.start(estt, (1,))
            leech_np.append(np)

        # call prepare explicitly just to make sure that the preparation is
        # already done once we start waiting for the acquisition
        f = self._stream.prepare()
        f.result()

        # start stream
        self._startt = time.time()
        self._stream.image.subscribe(self._image_listener)
        # TODO: if exception during activation, it will not be passed here
        # as the VA will just log it. => change _onActive to be a setter, or
        # check also the .status VA.
        self._stream.is_active.value = True

        # wait until one image acquired or cancelled
        if not self._acq_over.wait(10 * estt + 5):
            raise IOError("Acquisition of stream %s timed out after %f s" %
                          (self._stream.name.value, 10 * estt + 5))

        with self._condition:
            if self._state in (CANCELLED, CANCELLED_AND_NOTIFIED):
                raise CancelledError()

        # Call leeches for both first pixel acquired and end of acquisition
        for l, np in zip(self._stream.leeches, leech_np):
            if np is not None:
                l.next(self._stream.raw)
            l.complete(self._stream.raw)

        return self._stream.raw # the acquisition data
Ejemplo n.º 11
0
    def _run(self):
        """
        To be called to start the acquisition in the stream, and blocks until
        the task is finished
        returns (list of DataArray): acquisition data
        raises CancelledError if the acquisition was cancelled
        """
        # start stream
        self._stream.image.subscribe(self._image_listener)
        self._stream.is_active.value = True

        # TODO: timeout exception if too long (> 10 x estimated time)
        # wait until one image acquired or cancelled
        self._acq_over.wait()
        with self._condition:
            if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
                raise CancelledError()

        return self._stream.raw # the acquisition data
Ejemplo n.º 12
0
    def run(self):
        """
        Runs the tiled acquisition procedure
        returns:
            (list of DataArrays): a stitched data for each stream acquisition
        raise:
            CancelledError: if acquisition is cancelled
            Exception: if it failed before any result were acquired
        """
        if not self._future:
            return
        self._future._task_state = RUNNING
        st_data = []
        try:
            # Acquire the needed tiles
            da_list = self._acquireTiles()
            # Move stage to original position
            sub_f = self._stage.moveAbs(self._starting_pos)
            sub_f.result()

            if not da_list or not da_list[0]:
                logging.warning(
                    "No stream acquired that can be used for stitching.")
            else:
                logging.info("Acquisition completed, now stitching...")
                # Stitch the acquired tiles
                self._future.set_progress(end=self.estimateTime(0) +
                                          time.time())
                st_data = self._stitchTiles(da_list)

            if self._future._task_state == CANCELLED:
                raise CancelledError()
        except CancelledError:
            logging.debug("Acquisition cancelled")
        except Exception as ex:
            logging.exception("Acquisition failed.")
            self._future.running_subf.cancel()
        finally:
            logging.info("Tiled acquisition ended")
            self._stage.moveAbs(self._starting_pos)
            with self._future._task_lock:
                self._future._task_state = FINISHED
        return st_data
Ejemplo n.º 13
0
    def _waitEndMove(self, future, axes, end=0):
        """
        Wait until all the given axes are finished moving, or a request to 
        stop has been received.
        future (Future): the future it handles
        axes (set of int): the axes IDs to check
        end (float): expected end time
        raise:
            CancelledError: if cancelled before the end of the move
        """
        moving_axes = set(axes)

        last_upd = time.time()
        last_axes = moving_axes.copy()
        try:
            while not future._must_stop.is_set():
                for aid in moving_axes.copy(): # need copy to remove during iteration
                    if self._isOnTarget(aid):
                        moving_axes.discard(aid)
                if not moving_axes:
                    # no more axes to wait for
                    return

                # Update the position from time to time (10 Hz)
                if time.time() - last_upd > 0.1 or last_axes != moving_axes:
                    last_names = set(self._axes_names[i] for i in last_axes)
                    self._updatePosition(last_names)
                    last_upd = time.time()
                    last_axes = moving_axes.copy()

                # Wait half of the time left (maximum 0.1 s)
                left = end - time.time()
                sleept = max(0, min(left / 2, 0.1))
                future._must_stop.wait(sleept)

            logging.debug("Move of axes %s cancelled before the end", axes)
            # stop all axes still moving them
            for i in moving_axes:
                self.MotorStop(i)
            future._was_stopped = True
            raise CancelledError()
        finally:
            self._updatePosition() # update (all axes) with final position
Ejemplo n.º 14
0
    def cancel(self, msg='', mute=False):
        '''pep-3156_ API method, it cancel the deferred and schedule callbacks.
If the deferred is waiting for another :class:`Deferred`, forward the
cancellation to that one. If the :class:`Deferred` is already :meth:`done`,
it does nothing.

:param msg: Optional message to pass to the when the :class:`CancelledError`
    is initialised.'''
        if not self.done():
            if self._canceller:
                self._canceller(self)
            else:
                self._suppressAlreadyCalled = True
            if not self.done():
                self.callback(CancelledError(msg))
                if mute and isinstance(self.result, Failure):
                    self.result.mute()
        elif isinstance(self.result, Deferred):
            return self.result.cancel(msg, mute)
Ejemplo n.º 15
0
    def _acquireTile(self, i, ix, iy):
        """
        Calls acquire function and blocks until the data is returned.
        :return (list of DataArrays): list of acquired das for the current tile
        """
        # Update the progress bar
        self._future.set_progress(
            end=self.estimateTime((self._nx * self._ny) - i) + time.time())

        self._future.running_subf = acqmng.acquire(self._streams,
                                                   self._settings_obs)
        das, e = self._future.running_subf.result(
        )  # blocks until all the acquisitions are finished
        if e:
            logging.warning("Acquisition for tile %dx%d partially failed: %s",
                            ix, iy, e)

        if self._future._task_state == CANCELLED:
            raise CancelledError()
        return das
Ejemplo n.º 16
0
    def _result(self, raiseit=True):
        try:
            d = self.executor.futures[self.key]
        except KeyError:
            exception = CancelledError(self.key)
            if raiseit:
                raise exception
            else:
                raise gen.Return(exception)

        yield d['event'].wait()
        if self.status == 'error':
            exception = d['exception']
            traceback = d['traceback']
            if raiseit:
                six.reraise(type(exception), exception, traceback)
            else:
                raise Return([type(exception), exception, traceback])
        else:
            result = yield self.executor._gather([self])
            raise gen.Return(result[0])
Ejemplo n.º 17
0
 def _adjustFocus(self, das, i, ix, iy):
     if i % SKIP_TILES != 0:
         logging.debug("Skipping focus adjustment..")
         return das
     try:
         current_focus_level = MeasureOpticalFocus(das[self._streams.index(
             self._focus_stream)])
     except IndexError:
         logging.warning("Failed to get image to measure focus on.")
         return das
     if i == 0:
         # Use initial optical focus level to be compared to next tiles
         # TODO: instead of using the first image, use the best 10% images (excluding outliers)
         self._good_focus_level = current_focus_level
     # Run autofocus if current focus got worse than permitted deviation
     if abs(current_focus_level - self._good_focus_level
            ) / self._good_focus_level > FOCUS_FIDELITY:
         try:
             self._future.running_subf = AutoFocus(
                 self._focus_stream.detector,
                 self._focus_stream.emitter,
                 self._focus_stream.focuser,
                 good_focus=self._good_focus,
                 rng_focus=self._focus_rng,
                 method=MTD_EXHAUSTIVE)
             self._future.running_subf.result(
             )  # blocks until autofocus is finished
             if self._future._task_state == CANCELLED:
                 raise CancelledError()
         except CancelledError:
             raise
         except Exception as ex:
             logging.exception("Running autofocus failed on image i= %s." %
                               i)
         else:
             # Reacquire the out of focus tile (which should be corrected now)
             das = self._acquireTile(i, ix, iy)
     return das
Ejemplo n.º 18
0
    def _runAcquisition(self, future):
        # number of drift corrections per pixel
        nDC = self.nDC.value
        # Initialize spectrograph
        CENTERWL = self.centerWavelength.value
        SLIT_WIDTH = self.slitWidth.value
        # move to appropriate center wavelength
        self._sgr.moveAbs({"wavelength": CENTERWL}).result()
        # set slit width
        self._sgr.moveAbs({"slit-in": SLIT_WIDTH}).result()

        dt = self.dwellTime.value
        self._emitter.dwellTime.value = dt
        #exposure time and dwell time should be the same in this case
        bins = (self.binninghorz.value, self.binningvert.value)
        self._detector.binning.value = bins
        specresx = self._detector.shape[0] // bins[0]
        specresy = self._detector.shape[1] // bins[1]
        self._detector.resolution.value = (specresx, specresy)
        # semfov, physwidth = self._get_sem_fov()
        #xyps, stepsize = self._calc_xy_pos()
        xres, yres = self.get_scan_res()
        xyps = self.calc_xy_pos(self.roi.value, self.stepsize.value)
        logging.debug("Will scan on X/Y positions %s", xyps)

        #phys_rect = convert_roi_ratio_to_phys(escan,roi)
        measurement_n = 0
        ARdata = []
        sedata = []
        NPOS = len(xyps)  # = xres * yres
        self._save_hw_settings()

        # drift correction vectors
        dc_vect = (0, 0)
        # list instead of tuple, to allow changing just one item at a time
        tot_dc_vect = [0, 0]

        if self.dcRegion.value != UNDEFINED_ROI:
            drift_est = drift.AnchoredEstimator(self._emitter, self._sed,
                                                self.dcRegion.value,
                                                self.dcDwellTime.value)
            drift_est.acquire()
        else:
            drift_est = None

        try:
            if drift_est:
                self._start_spot(nDC)
                # re-adjust dwell time for number of drift corrections
                self._detector.exposureTime.value = dt / nDC
                self._emitter.dwellTime.value = dt / nDC

                for x, y in xyps:
                    sedatapix = []
                    sedatam = []
                    ARdatapix = []
                    ARdatam = []

                    for ll in range(self.nDC.value):
                        # add total drift vector at this point
                        xc = x - tot_dc_vect[0]
                        yc = y - tot_dc_vect[1]

                        # check if drift correction leads to an x,y position outside of scan region
                        cx, cy = self._emitter.translation.clip((xc, yc))
                        if (cx, cy) != (xc, yc):
                            logging.error(
                                "Drift of %s px caused acquisition region out "
                                "of bounds: needed to scan spot at %s.",
                                tot_dc_vect, (xc, yc))
                        xc, yc = (cx, cy)
                        xm, ym = self._convert_xy_pos_to_m(xc, yc)
                        logging.info(
                            "Acquiring scan number %d at position (%g, %g), with drift correction of %s",
                            ll + 1, xm, ym, tot_dc_vect)
                        startt = time.time()
                        ARdat, sedat = self._acquire_ARspec(
                            x, y, dt / nDC, future)
                        endt = time.time()
                        logging.debug("Took %g s (expected = %g s)",
                                      endt - startt, dt / nDC)
                        ARdatapix.append(ARdat)
                        sedatapix.append(sedat)
                        logging.debug("Memory used = %d bytes",
                                      udriver.readMemoryUsage())
                        drift_est.acquire()
                        dc_vect = drift_est.estimate()
                        tot_dc_vect[0] += dc_vect[0]
                        tot_dc_vect[1] += dc_vect[1]

                    measurement_n += 1
                    # TODO: update the future progress
                    logging.info("Acquired %d out of %d pixels", measurement_n,
                                 NPOS)

                    # Perform addition of measurements here which keeps other
                    # acquisitions the same and reduces memory required. We use 32 bits in this case as the data is 16 bits.
                    ARdatam = numpy.sum(ARdatapix, 0, dtype=numpy.float32)
                    # checks whether datavalue exceeds data-type range.
                    # Note: this works for integers only. For floats there is a separate numpy function
                    idt = numpy.iinfo(ARdatapix[0].dtype)
                    # we can choose different things here. For now we just force to clip the signal
                    ARdatam = numpy.clip(ARdatam, idt.min, idt.max)
                    # convert back to right datatype and (re)add metadata
                    ARdatam = model.DataArray(
                        ARdatam.astype(ARdatapix[0].dtype),
                        ARdatapix[0].metadata)
                    ARdata.append(ARdatam)

                    # For SE data just use mean because absolute scale is not relevant
                    sedatam = numpy.mean(sedatapix).astype(sedatapix[0].dtype)
                    # The brackets are required to give enough dimensions to make the rest happy
                    sedatam = model.DataArray([[[[sedatam]]]],
                                              sedatapix[0].metadata)
                    sedata.append(sedatam)

            else:
                self._start_spot(1)
                for x, y in xyps:
                    self._detector.exposureTime.value = dt
                    xm, ym = self._convert_xy_pos_to_m(x, y)
                    logging.info("Acquiring at position (%g, %g)", xm, ym)
                    startt = time.time()
                    # dwelltime is used as input for the acquisition because it is different for with drift and without
                    ARdat, sedat = self._acquire_ARspec(
                        x, y, self.dwellTime.value, future)
                    endt = time.time()
                    logging.debug("Took %g s (expected = %g s)", endt - startt,
                                  self.dwellTime.value)
                    ARdata.append(ARdat)
                    sedata.append(sedat)
                    logging.debug("Memory used = %d bytes",
                                  udriver.readMemoryUsage())
                    # number of scans that have been done. Could be printed to show progress
                    measurement_n += 1
                    # TODO: update the future progress
                    logging.info("Acquired %d out of %d pixels", measurement_n,
                                 NPOS)

            self._stop_spot()
            stepsize = (self.stepsize.value, self.stepsize.value)
            ARdata[0].metadata[model.MD_POS] = sedata[0].metadata[model.MD_POS]
            full_ARdata = self._assemble_ARspectral_data(
                ARdata, (xres, yres), self.roi.value, stepsize, bins, specresx)
            full_sedata = self._assemble_sed_data(sedata, (xres, yres),
                                                  self.roi.value, stepsize)

            if future._acq_state == CANCELLED:
                raise CancelledError()
            das = [full_ARdata, full_sedata]
            if drift_est:
                das.append(self._assembleAnchorData(drift_est.raw))

            return das

        except CancelledError:
            logging.info("AR spectral stream cancelled")
            self._stop_spot()
            with future._acq_lock:
                self._acq_state = FINISHED
            raise  # Just don't log the exception
        except Exception:
            logging.exception("Failure during AR spectral acquisition")
            raise
        finally:
            logging.debug("AR spectral acquisition finished")
            self._sed.data.unsubscribe(self._receive_sem_data)
            future._acq_done.set()
            self._resume_hw_settings()
Ejemplo n.º 19
0
def _DoCenterSpot(future, ccd, stage, escan, mx_steps, type, dfbkg):
    """
    Iteratively acquires an optical image, finds the coordinates of the spot
    (center) and moves the stage to this position. Repeats until the found
    coordinates are at the center of the optical image or a maximum number of
    steps is reached.
    future (model.ProgressiveFuture): Progressive future provided by the wrapper
    ccd (model.DigitalCamera): The CCD
    stage (model.Actuator): The stage
    escan (model.Emitter): The e-beam scanner
    mx_steps (int): Maximum number of steps to reach the center
    type (*_MOVE or BEAM_SHIFT): Type of move in order to align
    dfbkg (model.DataFlow or None): If provided, will be used to start/stop
     the e-beam emmision (it must be the dataflow of se- or bs-detector) in
     order to do background subtraction. If None, no background subtraction is
     performed.
    returns (float or None):    Final distance to the center (m)
            (2 floats): vector to the spot from the center (m, m)
    raises:
            CancelledError() if cancelled
    """
    try:
        logging.debug("Aligning spot...")
        steps = 0
        # Stop once spot is found on the center of the optical image
        dist = None
        while True:
            if future._spot_center_state == CANCELLED:
                raise CancelledError()
            # Or once max number of steps is reached
            if steps >= mx_steps:
                break

            # Wait to make sure no previous spot is detected
            image = AcquireNoBackground(ccd, dfbkg)
            try:
                spot_pxs = FindSpot(image)
            except LookupError:
                return None, None

            # Center of optical image
            pixelSize = image.metadata[model.MD_PIXEL_SIZE]
            center_pxs = (image.shape[1] / 2, image.shape[0] / 2)
            # Epsilon distance below which the lens is considered centered. The worse of:
            # * 1.5 pixels (because the CCD resolution cannot give us better)
            # * 1 µm (because that's the best resolution of our actuators)
            err_mrg = max(1.5 * pixelSize[0], 1e-06)  # m

            tab_pxs = [a - b for a, b in zip(spot_pxs, center_pxs)]
            tab = (tab_pxs[0] * pixelSize[0], tab_pxs[1] * pixelSize[1])
            logging.debug("Found spot @ %s px", spot_pxs)
            dist = math.hypot(*tab)
            # If we are already there, stop
            if dist <= err_mrg:
                break

            # Move to the found spot
            if type == OBJECTIVE_MOVE:
                f = stage.moveRel({"x": tab[0], "y":-tab[1]})
                f.result()
            elif type == STAGE_MOVE:
                f = stage.moveRel({"x":-tab[0], "y": tab[1]})
                f.result()
            else:
                escan.translation.value = (-tab_pxs[0], -tab_pxs[1])
            steps += 1
            # Update progress of the future
            future.set_progress(end=time.time() +
                                estimateCenterTime(ccd.exposureTime.value, dist))

        return dist, tab
    finally:
        with future._center_lock:
            if future._spot_center_state == CANCELLED:
                raise CancelledError()
            future._spot_center_state = FINISHED
Ejemplo n.º 20
0
def _DoAlignSpot(future, ccd, stage, escan, focus, type, dfbkg, rng_f):
    """
    Adjusts settings until we have a clear and well focused optical spot image,
    detects the spot and manipulates the stage so as to move the spot center to
    the optical image center. If no spot alignment is achieved an exception is
    raised.
    future (model.ProgressiveFuture): Progressive future provided by the wrapper
    ccd (model.DigitalCamera): The CCD
    stage (model.Actuator): The stage
    escan (model.Emitter): The e-beam scanner
    focus (model.Actuator): The optical focus
    type (string): Type of move in order to align
    dfbkg (model.DataFlow): dataflow of se- or bs- detector
    rng_f (tuple of floats): range to apply Autofocus on if needed
    returns (float):    Final distance to the center #m
    raises:
            CancelledError() if cancelled
            IOError
    """
    init_binning = ccd.binning.value
    init_et = ccd.exposureTime.value
    init_cres = ccd.resolution.value
    init_scale = escan.scale.value
    init_eres = escan.resolution.value

    # TODO: allow to pass the precision as argument. As for the Delphi, we don't
    # need such an accuracy on the alignment (as it's just for twin stage calibration).

    # TODO: take logpath as argument, to store images later on

    logging.debug("Starting Spot alignment...")
    try:
        if future._task_state == CANCELLED:
            raise CancelledError()

        # Configure CCD and set ebeam to spot mode
        logging.debug("Configure CCD and set ebeam to spot mode...")
        ccd.binning.value = ccd.binning.clip((2, 2))
        ccd.resolution.value = ccd.resolution.range[1]
        ccd.exposureTime.value = 0.3
        escan.scale.value = (1, 1)
        escan.resolution.value = (1, 1)

        if future._task_state == CANCELLED:
            raise CancelledError()
        logging.debug("Adjust exposure time...")
        if dfbkg is None:
            # Long exposure time to compensate for no background subtraction
            ccd.exposureTime.value = 1.1
        else:
            # TODO: all this code to decide whether to pick exposure 0.3 or 1.5?
            # => KISS! Use always 1s... or allow up to 5s?
            # Estimate noise and adjust exposure time based on "Rose criterion"
            image = AcquireNoBackground(ccd, dfbkg)
            snr = MeasureSNR(image)
            while snr < 5 and ccd.exposureTime.value < 1.5:
                ccd.exposureTime.value = ccd.exposureTime.value + 0.2
                image = AcquireNoBackground(ccd, dfbkg)
                snr = MeasureSNR(image)
            logging.debug("Using exposure time of %g s", ccd.exposureTime.value)

        hqet = ccd.exposureTime.value  # exposure time for high-quality (binning == 1x1)
        if ccd.binning.value == (2, 2):
            hqet *= 4  # To compensate for smaller binning

        logging.debug("Trying to find spot...")
        for i in range(3):
            if future._task_state == CANCELLED:
                raise CancelledError()

            if i == 0:
                future._centerspotf = CenterSpot(ccd, stage, escan, ROUGH_MOVE, type, dfbkg)
                dist, vector = future._centerspotf.result()
            elif i == 1:
                logging.debug("Spot not found, auto-focusing...")
                try:
                    # When Autofocus set binning 8 if possible, and use exhaustive
                    # method to be sure not to miss the spot.
                    ccd.binning.value = ccd.binning.clip((8, 8))
                    future._autofocusf = autofocus.AutoFocus(ccd, None, focus, dfbkg, rng_focus=rng_f, method=MTD_EXHAUSTIVE)
                    lens_pos, fm_level = future._autofocusf.result()
                    # Update progress of the future
                    future.set_progress(end=time.time() +
                                        estimateAlignmentTime(hqet, dist, 1))
                except IOError as ex:
                    logging.error("Autofocus on spot image failed: %s", ex)
                    raise IOError('Spot alignment failure. AutoFocus failed.')
                logging.debug("Trying again to find spot...")
                future._centerspotf = CenterSpot(ccd, stage, escan, ROUGH_MOVE, type, dfbkg)
                dist, vector = future._centerspotf.result()
            elif i == 2:
                if dfbkg is not None:
                    # In some case background subtraction goes wrong, and makes
                    # things worse, so try without.
                    logging.debug("Trying again to find spot, without background subtraction...")
                    dfbkg = None
                    future._centerspotf = CenterSpot(ccd, stage, escan, ROUGH_MOVE, type, dfbkg)
                    dist, vector = future._centerspotf.result()

            if dist is not None:
                break
        else:
            raise IOError('Spot alignment failure. Spot not found')

        ccd.binning.value = (1, 1)
        ccd.exposureTime.value = ccd.exposureTime.clip(hqet)

        # Update progress of the future
        future.set_progress(end=time.time() +
                            estimateAlignmentTime(hqet, dist, 1))
        logging.debug("After rough alignment, spot center is at %s m", vector)

        # Limit FoV to save time
        logging.debug("Cropping FoV...")
        CropFoV(ccd, dfbkg)
        if future._task_state == CANCELLED:
            raise CancelledError()

        # Update progress of the future
        future.set_progress(end=time.time() +
                            estimateAlignmentTime(hqet, dist, 0))

        # Center spot
        if future._task_state == CANCELLED:
            raise CancelledError()
        logging.debug("Aligning spot...")
        future._centerspotf = CenterSpot(ccd, stage, escan, FINE_MOVE, type, dfbkg)
        dist, vector = future._centerspotf.result()
        if dist is None:
            raise IOError('Spot alignment failure. Cannot reach the center.')
        logging.info("After fine alignment, spot center is at %s m", vector)
        return dist, vector
    finally:
        ccd.binning.value = init_binning
        ccd.exposureTime.value = init_et
        ccd.resolution.value = init_cres
        escan.scale.value = init_scale
        escan.resolution.value = init_eres
        with future._alignment_lock:
            future._done.set()
            if future._task_state == CANCELLED:
                raise CancelledError()
            future._task_state = FINISHED
Ejemplo n.º 21
0
    def _runAcquisition(self, future):

        self._detector.pixelDuration.value = self.pixelDuration.value
        logging.debug("Syncoffset used %s", self.syncOffset.value)
        logging.debug("SyncDiv used %s", self.syncDiv.value)

        # number of drift corrections per pixel
        nDC = self.nDC.value
        # semfov, physwidth = self._get_sem_fov()
        #xyps, stepsize = self._calc_xy_pos()
        xres, yres = self.get_scan_res()
        xyps = self.calc_xy_pos(self.roi.value, self.stepsize.value)
        logging.debug("Will scan on X/Y positions %s", xyps)

        #phys_rect = convert_roi_ratio_to_phys(escan,roi)
        measurement_n = 0
        cordata = []
        sedata = []
        NPOS = len(xyps)  # = xres * yres

        self._save_hw_settings()

        # a list (instead of a tuple) for the summation to work on each element independently
        tot_dc_vect = [0, 0]

        #check whether a drift region is defined
        if self.dcRegion.value != UNDEFINED_ROI:
            drift_est = drift.AnchoredEstimator(self._emitter, self._sed,
                                                self.dcRegion.value,
                                                self.dcDwellTime.value)
            drift_est.acquire()
        else:
            drift_est = None

        try:
            if drift_est:
                self._start_spot(nDC)
                # re-adjust dwell time for number of drift corrections
                self._detector.dwellTime.value = self.dwellTime.value / nDC
                self._emitter.dwellTime.value = self.dwellTime.value / nDC

                for x, y in xyps:
                    sedatapix = []
                    cordatapix = []

                    for ll in range(self.nDC.value):
                        # add total drift vector at this point
                        xc = x - tot_dc_vect[0]
                        yc = y - tot_dc_vect[1]

                        # check if drift correction leads to an x,y position outside of scan region
                        cx, cy = self._emitter.translation.clip((xc, yc))
                        if (cx, cy) != (xc, yc):
                            logging.error(
                                "Drift of %s px caused acquisition region out "
                                "of bounds: needed to scan spot at %s.",
                                tot_dc_vect, (xc, yc))
                        xc, yc = (cx, cy)
                        xm, ym = self._convert_xy_pos_to_m(xc, yc)
                        logging.info(
                            "Acquiring scan number %d at position (%g, %g), with drift correction of %s",
                            ll + 1, xm, ym, tot_dc_vect)
                        startt = time.time()
                        cordat, sedat = self._acquire_correlator(
                            xc, yc, self.dwellTime.value / nDC, future)
                        endt = time.time()
                        logging.debug("Took %g s (expected = %g s)",
                                      endt - startt,
                                      self.dwellTime.value / nDC)
                        cordatapix.append(cordat)
                        sedatapix.append(sedat)
                        logging.debug("Memory used = %d bytes",
                                      udriver.readMemoryUsage())
                        drift_est.acquire()
                        # drift correction vectors
                        dc_vect = drift_est.estimate()
                        tot_dc_vect[0] += dc_vect[0]
                        tot_dc_vect[1] += dc_vect[1]

                    measurement_n += 1
                    # TODO: update the future progress
                    logging.info("Acquired %d out of %d pixels", measurement_n,
                                 NPOS)

                    # Perform addition of measurements here which keeps other
                    # acquisitions the same and reduces memory required.
                    cordatam = numpy.sum(cordatapix, 0, dtype=numpy.float64)
                    # checks whether datavalue exceeds data-type range.
                    # Note: this works for integers only. For floats there is a separate numpy function
                    idt = numpy.iinfo(cordatapix[0].dtype)
                    # we can choose different things here. For now we just force to clip the signal
                    cordatam = numpy.clip(cordatam, idt.min, idt.max)
                    # convert back to right datatype and (re)add metadata
                    cordatam = model.DataArray(
                        cordatam.astype(cordatapix[0].dtype),
                        cordatapix[0].metadata)
                    cordata.append(cordatam)

                    # For SE data just use mean because absolute scale is not relevant
                    sedatam = numpy.mean(sedatapix).astype(sedatapix[0].dtype)
                    # The brackets are required to give enough dimensions to make the rest happy
                    sedatam = model.DataArray([[[[sedatam]]]],
                                              sedatapix[0].metadata)
                    sedata.append(sedatam)

            else:
                self._start_spot(1)
                for x, y in xyps:
                    self._detector.dwellTime.value = self.dwellTime.value
                    xm, ym = self._convert_xy_pos_to_m(x, y)
                    logging.info("Acquiring at position (%g, %g)", xm, ym)
                    startt = time.time()
                    # dwelltime is used as input for the acquisition because it is different for with drift and without
                    cordat, sedat = self._acquire_correlator(
                        x, y, self.dwellTime.value, future)
                    endt = time.time()
                    logging.debug("Took %g s (expected = %g s)", endt - startt,
                                  self.dwellTime.value)
                    cordata.append(cordat)
                    sedata.append(sedat)
                    logging.debug("Memory used = %d bytes",
                                  udriver.readMemoryUsage())
                    # number of scans that have been done. Could be printed to show progress
                    measurement_n += 1
                    # TODO: update the future progress
                    logging.info("Acquired %d out of %d pixels", measurement_n,
                                 NPOS)

            self._stop_spot()
            stepsize = (self.stepsize.value, self.stepsize.value)
            cordata[0].metadata[model.MD_POS] = sedata[0].metadata[
                model.MD_POS]
            full_cordata = self._assemble_correlator_data(
                cordata, (xres, yres), self.roi.value, stepsize)
            full_sedata = self._assemble_sed_data(sedata, (xres, yres),
                                                  self.roi.value, stepsize)

            if future._acq_state == CANCELLED:
                raise CancelledError()
            das = [full_cordata, full_sedata]
            if drift_est:
                das.append(self._assembleAnchorData(drift_est.raw))

            return das

        except CancelledError:
            logging.info("Time correlator stream cancelled")
            with future._acq_lock:
                self._acq_state = FINISHED
            raise  # Just don't log the exception
        except Exception:
            logging.exception("Failure during Correlator acquisition")
            raise
        finally:
            logging.debug("TC acquisition finished")
            # Make sure all detectors are stopped
            self._stop_spot()
            self._detector.data.unsubscribe(self._receive_tc_data)
            future._acq_done.set()
            self._resume_hw_settings()
Ejemplo n.º 22
0
    def _acquire_correlator(self, x, y, dwellT, future):
        """
        Acquire N images from the correlator while having the e-beam at a spot position
        escan (model.Emitter): the e-beam scanner
        edet (model.Detector): any detector of the SEM
        correlator  the time correlator
        x, y (floats): spot position in the ebeam coordinates
        """

        # TODO: maybe it is better to move these commands out of this function and into the master because these parameters should not change
        self._move_spot(x, y)

        # get correlator data
        startt = time.time()
        #dat = self._detector.data.get()
        self._detector.data.subscribe(self._receive_tc_data)
        timeout = 1 + dwellT * 1.5
        if not self.tc_data_received.wait(timeout):
            if future._acq_state == CANCELLED:
                raise CancelledError()
            logging.warning("No time-correlator data received, will retry")
            self._detector.data.unsubscribe(self._receive_tc_data)
            time.sleep(0.1)
            self._detector.data.subscribe(self._receive_tc_data)
            if not self.tc_data_received.wait(timeout):
                raise IOError(
                    "No time-correlator data received twice in a row")
        if future._acq_state == CANCELLED:
            raise CancelledError()

        dat = self.tc_data
        dat.shape += (1, 1)

        dur_cor = time.time() - startt
        if dur_cor < dwellT * 0.99:
            logging.error(
                "Correlator data arrived after %g s, while expected at least %g s",
                dur_cor, dwellT)
        # wait for the SE data, in case it hasn't arrived yet
        if not self.sem_data_received.wait(3):
            logging.warning(
                "No SEM data received, 3s after the correlator data")
        if not self.sem_data_received.wait(dwellT):
            raise IOError("No SEM data received")
        self._pause_spot()

        if future._acq_state == CANCELLED:
            raise CancelledError()

        if len(self.sem_data) > 1:
            logging.info("Received %d SEM data, while expected just 1",
                         len(self.sem_data))

        sedat = self.sem_data[0]
        sedat.shape += (1, 1)

        # TODO: it might actually be better to just give the whole list, and
        # the exporter will take care of assembling the data, while keeping the
        # acquisition date correct for each image.

        # insert a new axis, for N

        # Make a DataArray with the metadata from the first point
        #full_data = model.DataArray(dat,metadata=md)

        return dat, sedat
Ejemplo n.º 23
0
def _DoAutoFocus(future, detector, max_step, thres_factor, et, focus,
                 background, dataflow):
    """
    Iteratively acquires an optical image, measures its focus level and adjusts 
    the optical focus with respect to the focus level.
    future (model.ProgressiveFuture): Progressive future provided by the wrapper 
    detector: model.DigitalCamera or model.Detector
    max_step: step used in case we are completely out of focus
    thres_factor: threshold factor depending on type of detector and binning
    et: exposure time if detector is a ccd, 
        dwellTime*prod(resolution) if detector is an SEM
    focus (model.Actuator): The optical focus
    background (boolean): If True apply background substraction
    dataflow (model.DataFlow): dataflow of se- or bs- detector
    returns (float):    Focus position #m
                        Focus level
    raises:    
            CancelledError if cancelled
            IOError if procedure failed
    """
    logging.debug("Starting Autofocus...")

    try:
        rng = focus.axes["z"].range

        for trial in range(MAX_BS_NUMBER):
            # Keep the initial focus position
            init_pos = focus.position.value.get('z')
            best_pos = init_pos
            step = max_step / 2
            cur_pos = focus.position.value.get('z')
            image = SubstractBackground(detector, dataflow)
            fm_cur = MeasureFocus(image)
            init_fm = fm_cur
            best_fm = init_fm
            #Clip within range
            new_pos = _ClippedMove(rng, focus, step)
            image = SubstractBackground(detector, dataflow)
            fm_test = MeasureFocus(image)
            if fm_test > best_fm:
                best_pos = new_pos
                best_fm = fm_test

            if future._autofocus_state == CANCELLED:
                raise CancelledError()
            cur_pos = focus.position.value.get('z')

            # Check if we our completely out of focus
            if abs(fm_cur - fm_test) < ((thres_factor / (trial + 1)) * fm_cur):
                logging.warning("Completely out of focus, retrying...")
                step = max_step
                fm_new = 0
                sign = 1
                factor = 1
                new_step = step
                cur_pos = focus.position.value.get('z')

                steps = 0
                while fm_new - fm_test < ((thres_factor /
                                           (trial + 1)) * 2) * fm_test:
                    if steps >= MAX_STEPS_NUMBER:
                        break
                    sign = -sign
                    cur_pos = cur_pos + sign * new_step
                    #if sign == 1:
                    factor += 1
                    new_step = factor * step
                    #if rng[0] <= cur_pos <= rng[1]:
                    pos = focus.position.value.get('z')
                    shift = cur_pos - pos
                    new_pos = _ClippedMove(rng, focus, shift)
                    image = SubstractBackground(detector, dataflow)
                    fm_new = MeasureFocus(image)
                    if fm_new > best_fm:
                        best_pos = new_pos
                        best_fm = fm_new
                    if future._autofocus_state == CANCELLED:
                        raise CancelledError()
                    steps += 1

                image = SubstractBackground(detector, dataflow)
                fm_cur = MeasureFocus(image)
                if fm_cur > best_fm:
                    best_pos = new_pos
                    best_fm = fm_cur
                new_pos = _ClippedMove(rng, focus, step)
                image = SubstractBackground(detector, dataflow)
                fm_test = MeasureFocus(image)
                if fm_test > best_fm:
                    best_pos = new_pos
                    best_fm = fm_test
                if future._autofocus_state == CANCELLED:
                    raise CancelledError()

            # Update progress of the future
            future.set_end_time(time.time() +
                                estimateAutoFocusTime(et, MAX_STEPS_NUMBER /
                                                      2))
            # Determine focus direction
            if fm_cur > fm_test:
                sign = -1
                new_pos = _ClippedMove(rng, focus, -step)
                if future._autofocus_state == CANCELLED:
                    raise CancelledError()
                fm_test = fm_cur
            else:
                sign = 1

            # Move the lens in the correct direction until focus measure is decreased
            step = max_step / 2
            fm_old, fm_new = fm_test, fm_test
            steps = 0
            while fm_old - fm_new <= (thres_factor / (trial + 1)) * fm_old:
                if steps >= MAX_STEPS_NUMBER:
                    break
                fm_old = fm_new
                before_move = focus.position.value.get('z')
                new_pos = _ClippedMove(rng, focus, sign * step)
                after_move = focus.position.value.get('z')
                # Do not stuck to the border
                if before_move == after_move:
                    sign = -sign
                image = SubstractBackground(detector, dataflow)
                fm_new = MeasureFocus(image)
                if fm_new > best_fm:
                    best_pos = new_pos
                    best_fm = fm_new
                if future._autofocus_state == CANCELLED:
                    raise CancelledError()
                steps += 1

            # Binary search between the last 2 positions
            new_pos = _ClippedMove(rng, focus,
                                   sign * (step / (2 / (trial + 1))))
            max_step = max_step / 8

        if future._autofocus_state == CANCELLED:
            raise CancelledError()

        # Return to best measured focus position anyway
        pos = focus.position.value.get('z')
        shift = best_pos - pos
        new_pos = _ClippedMove(rng, focus, shift)
        return focus.position.value.get('z'), best_fm
    except CancelledError:
        pos = focus.position.value.get('z')
        shift = best_pos - pos
        new_pos = _ClippedMove(rng, focus, shift)
    finally:
        with future._autofocus_lock:
            if future._autofocus_state == CANCELLED:
                raise CancelledError()
            future._autofocus_state = FINISHED
Ejemplo n.º 24
0
    def _runAcquisition(self, future):
        self._data = []
        self._md = {}

        wls = self.startWavelength.value
        wle = self.endWavelength.value
        res = self.numberOfPixels.value
        dt = self.dwellTime.value
        trig = self._detector.softwareTrigger
        df = self._detector.data

        # Prepare the hardware
        self._emitter.resolution.value = (1, 1)  # Force one pixel only
        self._emitter.translation.value = self.emtTranslation.value
        self._emitter.dwellTime.value = dt

        df.synchronizedOn(trig)
        df.subscribe(self._on_mchr_data)

        wllist = []
        if wle == wls:
            res = 1

        if res <= 1:
            res = 1
            wli = 0
        else:
            wli = (wle - wls) / (res - 1)

        try:
            for i in range(res):
                left = (res - i) * (dt + 0.05)
                future.set_progress(end=time.time() + left)

                cwl = wls + i * wli  # requested value
                self._sgr.moveAbs({"wavelength": cwl}).result()
                if future._acq_state == CANCELLED:
                    raise CancelledError()
                cwl = self._sgr.position.value["wavelength"]  # actual value
                logging.info("Acquiring point %d/%d @ %s", i + 1, res,
                             units.readable_str(cwl, unit="m", sig=3))

                self._pt_acq.clear()
                trig.notify()
                if not self._pt_acq.wait(dt * 5 + 1):
                    raise IOError("Timeout waiting for the data")
                if future._acq_state == CANCELLED:
                    raise CancelledError()
                wllist.append(cwl)

            # Done
            df.unsubscribe(self._on_mchr_data)
            df.synchronizedOn(None)

            # Convert the sequence of data into one spectrum in a DataArray

            if wls > wle:  # went backward? => sort back the spectrum
                logging.debug(
                    "Inverting spectrum as acquisition went from %g to %g m",
                    wls, wls)
                self._data.reverse()
                wllist.reverse()

            na = numpy.array(self._data)  # keeps the dtype
            na.shape += (1, 1, 1, 1)  # make it 5th dim to indicate a channel
            md = self._md
            md[model.MD_WL_LIST] = wllist
            if model.MD_OUT_WL in md:
                # The MD_OUT_WL on the monochromator contains the current cw, which we don't want
                del md[model.MD_OUT_WL]

            # MD_POS should already be at the correct position (from the e-beam metadata)

            # MD_PIXEL_SIZE is not meaningful but handy for the display in Odemis
            # (it's the size of the square on top of the SEM survey => BIG!)
            sempxs = self._emitter.pixelSize.value
            md[model.MD_PIXEL_SIZE] = (sempxs[0] * 50, sempxs[1] * 50)

            spec = model.DataArray(na, md)

            with future._acq_lock:
                if future._acq_state == CANCELLED:
                    raise CancelledError()
                future._acq_state = FINISHED

            return [spec]

        except CancelledError:
            raise  # Just don't log the exception
        except Exception:
            logging.exception("Failure during monochromator scan")
        finally:
            # In case it was stopped before the end
            df.unsubscribe(self._on_mchr_data)
            df.synchronizedOn(None)

            future._acq_done.set()
Ejemplo n.º 25
0
def _DoAutoFocusSpectrometer(future, spectrograph, focuser, detectors,
                             selector):
    """
    cf AutoFocusSpectrometer
    return dict((grating, detector) -> focus pos)
    """
    ret = {}
    # Record the wavelength and grating position
    pos_orig = {
        k: v
        for k, v in spectrograph.position.value.items()
        if k in ("wavelength", "grating")
    }
    gratings = spectrograph.axes["grating"].choices.keys()
    if selector:
        sel_orig = selector.position.value

    # For progress update
    cnts = len(gratings) + (len(detectors) - 1)

    # Note: this procedure works well with the SR-193i. In particular, it
    # records the focus position for each grating (in absolute) and each
    # detector (as an offset). It needs to be double checked if used with
    # other detectors.
    if "Shamrock" not in spectrograph.hwVersion:
        logging.warning(
            "Spectrometer autofocusing has not been tested on"
            "this type of spectrograph (%s)", spectrograph.hwVersion)

    try:
        # Autofocus each grating, using the first detector
        detector = detectors[0]
        if selector:
            _moveSelectorToDetector(selector, detector)

        if future._autofocus_state == CANCELLED:
            raise CancelledError()

        # start with the current grating, to save the move time
        gratings.sort(key=lambda g: 0 if g == pos_orig["grating"] else 1)
        for g in gratings:
            logging.debug("Autofocusing on grating %s", g)
            tstart = time.time()
            try:
                # 0th order is not absolutely necessary for focusing, but it
                # typically gives the best results
                spectrograph.moveAbsSync({"wavelength": 0, "grating": g})
            except Exception:
                logging.exception("Failed to move to 0th order for grating %s",
                                  g)

            future._subfuture = AutoFocus(detector, None, focuser)
            fp, flvl = future._subfuture.result()
            ret[(g, detector)] = fp
            cnts -= 1
            _updateAFSProgress(future, time.time() - tstart, cnts)

            if future._autofocus_state == CANCELLED:
                raise CancelledError()

        # Autofocus each additional detector
        grating = pos_orig["grating"]
        for d in detectors[1:]:
            logging.debug("Autofocusing on detector %s", d.name)
            tstart = time.time()
            _moveSelectorToDetector(selector, d)
            try:
                # 0th order + original grating
                # TODO: instead of using original grating, use mirror grating if
                # available
                spectrograph.moveAbsSync({"wavelength": 0, "grating": grating})
            except Exception:
                logging.exception("Failed to move to 0th order and grating %s",
                                  grating)

            future._subfuture = AutoFocus(d, None, focuser)
            fp, flvl = future._subfuture.result()
            ret[(grating, d)] = fp
            cnts -= 1
            _updateAFSProgress(future, time.time() - tstart, cnts)

            if future._autofocus_state == CANCELLED:
                raise CancelledError()

        return ret
    except CancelledError:
        logging.debug("AutofocusSpectrometer cancelled")
    finally:
        spectrograph.moveAbsSync(pos_orig)
        if selector:
            selector.moveAbsSync(sel_orig)
        with future._autofocus_lock:
            if future._autofocus_state == CANCELLED:
                raise CancelledError()
            future._autofocus_state = FINISHED
Ejemplo n.º 26
0
    def _doSpotAcquisition(self, electron_coordinates, scale):
        """
        Perform acquisition spot per spot.
        Slow, but works even if SEM FoV is small
        """
        escan = self.escan
        ccd = self.ccd
        detector = self.detector
        dwell_time = self.dwell_time
        escan.scale.value = (1, 1)
        escan.resolution.value = (1, 1)

        # Set dt large enough so we unsubscribe before we even get an SEM
        # image (just to discard it) and start a second scan which would
        # cost in time.
        sem_dt = 2 * dwell_time
        escan.dwellTime.value = escan.dwellTime.clip(sem_dt)

        # CCD setup
        sem_shape = escan.shape[0:2]
        # sem ROI is ltrb
        sem_roi = (electron_coordinates[0][0] / sem_shape[0] + 0.5,
                   electron_coordinates[0][1] / sem_shape[1] + 0.5,
                   electron_coordinates[-1][0] / sem_shape[0] + 0.5,
                   electron_coordinates[-1][1] / sem_shape[1] + 0.5)
        ccd_roi = self.sem_roi_to_ccd(sem_roi)
        self.configure_ccd(ccd_roi)

        if self.bgsub:
            _set_blanker(self.escan, True)
            self.bg_image = ccd.data.get(asap=False)
            _set_blanker(self.escan, False)

        et = dwell_time
        ccd.exposureTime.value = et  # s
        readout = numpy.prod(ccd.resolution.value) / ccd.readoutRate.value
        tot_time = et + readout + 0.05
        logging.debug("Scanning spot grid with image per spot procedure...")

        self._spot_images = []
        for spot in electron_coordinates:
            self._ccd_done.clear()
            escan.translation.value = spot
            logging.debug("Scanning spot %s", escan.translation.value)
            try:
                if self._acq_state == CANCELLED:
                    raise CancelledError()
                detector.data.subscribe(self._discard_data)
                ccd.data.subscribe(self._onSpotImage)

                # Wait for CCD to capture the image
                if not self._ccd_done.wait(2 * tot_time + 4):
                    raise TimeoutError("Acquisition of CCD timed out")

            finally:
                detector.data.unsubscribe(self._discard_data)
                ccd.data.unsubscribe(self._onSpotImage)

        with self._acq_lock:
            if self._acq_state == CANCELLED:
                raise CancelledError()
            logging.debug("Scan done.")
            self._acq_state = FINISHED

        return self._spot_images, electron_coordinates, scale
Ejemplo n.º 27
0
def _DoFindOverlay(future,
                   repetitions,
                   dwell_time,
                   max_allowed_diff,
                   escan,
                   ccd,
                   detector,
                   skew=False):
    """
    Scans a spots grid using the e-beam and captures the CCD image, isolates the
    spots in the CCD image and finds the coordinates of their centers, matches the
    coordinates of the spots in the CCD image to those of SEM image and calculates
    the transformation values from optical to electron image (i.e. ScanGrid->
    DivideInNeighborhoods->FindCenterCoordinates-> ReconstructCoordinates->MatchCoordinates->
    CalculateTransform). In case matching the coordinates is infeasible, it automatically
    repeats grid scan -and thus all steps until matching- with different parameters.
    future (model.ProgressiveFuture): Progressive future provided by the wrapper
    repetitions (tuple of ints): The number of CL spots are used
    dwell_time (float): Time to scan each spot (in s)
    max_allowed_diff (float): Maximum allowed difference (in m) between the spot
      coordinates and the estimated spot position based on the computed
      transformation (in m). If no transformation can be found to fit this
      limit, the procedure will fail.
    escan (model.Emitter): The e-beam scanner
    ccd (model.DigitalCamera): The CCD
    detector (model.Detector): The electron detector
    skew (boolean): If True, also compute skew
    returns tuple: Transformation parameters
                translation (Tuple of 2 floats)
                scaling (Float)
                rotation (Float)
            dict : Transformation metadata
    raises:
            CancelledError if cancelled
            ValueError if procedure failed
    """
    # TODO: drop the "skew" argument (to always True) once we are convinced it
    # works fine
    # TODO: take the limits of the acceptable values for the metadata, and raise
    # an error when the data is not within range (or retry)
    logging.debug("Starting Overlay...")

    try:
        _set_blanker(escan, False)

        # Repeat until we can find overlay (matching coordinates is feasible)
        for trial in range(MAX_TRIALS_NUMBER):
            logging.debug("Trying with dwell time = %g s...",
                          future._gscanner.dwell_time)
            # For making a report when a failure happens
            report = OrderedDict()  # Description (str) -> value (str()'able)
            optical_image = None
            report["Grid size"] = repetitions
            report["SEM magnification"] = escan.magnification.value
            report["SEM pixel size"] = escan.pixelSize.value
            report["SEM FoV"] = tuple(
                s * p for s, p in zip(escan.shape, escan.pixelSize.value))
            report["Maximum difference allowed"] = max_allowed_diff
            report["Dwell time"] = dwell_time
            subimages = []

            try:
                # Grid scan
                if future._find_overlay_state == CANCELLED:
                    raise CancelledError()

                # Update progress of the future (it may be the second trial)
                future.set_progress(end=time.time() + estimateOverlayTime(
                    future._gscanner.dwell_time, repetitions))

                # Wait for ScanGrid to finish
                optical_image, electron_coordinates, electron_scale = future._gscanner.DoAcquisition(
                )
                report["Spots coordinates in SEM ref"] = electron_coordinates

                if future._find_overlay_state == CANCELLED:
                    raise CancelledError()

                # Update remaining time to 6secs (hardcoded estimation)
                future.set_progress(end=time.time() + 6)

                # Check if ScanGrid gave one image or list of images
                # If it is a list, follow the "one image per spot" procedure
                logging.debug("Isolating spots...")
                if isinstance(optical_image, list):
                    report["Acquisition method"] = "One image per spot"
                    opxs = optical_image[0].metadata[model.MD_PIXEL_SIZE]
                    opt_img_shape = optical_image[0].shape
                    subimage_coordinates = []
                    for oimg in optical_image:
                        subspots, subspot_coordinates = coordinates.DivideInNeighborhoods(
                            oimg, (1, 1), oimg.shape[0] / 2)
                        subimages.append(subspots[0])
                        subimage_coordinates.append(subspot_coordinates[0])
                else:
                    report["Acquisition method"] = "Whole image"
                    # Distance between spots in the optical image (in optical pixels)
                    opxs = optical_image.metadata[model.MD_PIXEL_SIZE]
                    optical_dist = escan.pixelSize.value[0] * electron_scale[
                        0] / opxs[0]
                    opt_img_shape = optical_image.shape

                    # Isolate spots
                    if future._find_overlay_state == CANCELLED:
                        raise CancelledError()

                    subimages, subimage_coordinates = coordinates.DivideInNeighborhoods(
                        optical_image, repetitions, optical_dist)

                if not subimages:
                    raise OverlayError(
                        "Overlay failure: failed to partition image")
                report["Optical pixel size"] = opxs
                report["Optical FoV"] = tuple(
                    s * p for s, p in zip(opt_img_shape[::-1], opxs))
                report[
                    "Coordinates of partitioned optical images"] = subimage_coordinates

                if max_allowed_diff < opxs[0] * 4:
                    logging.warning(
                        "The maximum distance is very small compared to the optical pixel size: "
                        "%g m vs %g m", max_allowed_diff, opxs[0])

                # Find the centers of the spots
                if future._find_overlay_state == CANCELLED:
                    raise CancelledError()
                logging.debug("Finding spot centers with %d subimages...",
                              len(subimages))
                spot_coordinates = [
                    spot.FindCenterCoordinates(i) for i in subimages
                ]

                # Reconstruct the optical coordinates
                if future._find_overlay_state == CANCELLED:
                    raise CancelledError()
                optical_coordinates = coordinates.ReconstructCoordinates(
                    subimage_coordinates, spot_coordinates)

                # Check if SEM calibration is correct. If this is not the case
                # generate a warning message and provide the ratio of X/Y scale.
                ratio = _computeGridRatio(optical_coordinates, repetitions)
                report["SEM X/Y ratio"] = ratio
                if not (0.9 < ratio < 1.1):
                    logging.warning(
                        "SEM may needs calibration. X/Y ratio is %f.", ratio)
                else:
                    logging.info("SEM X/Y ratio is %f.", ratio)

                opt_offset = (opt_img_shape[1] / 2, opt_img_shape[0] / 2)
                optical_coordinates = [(x - opt_offset[0], y - opt_offset[1])
                                       for x, y in optical_coordinates]
                report[
                    "Spots coordinates in Optical ref"] = optical_coordinates

                # Estimate the scale by measuring the distance between the closest
                # two spots in optical and electron coordinates.
                #  * For electrons, it's easy as we've placed them.
                #  * For optical, we pick one spot, and measure the distance to the
                #    closest spot.
                p1 = optical_coordinates[0]

                def dist_to_p1(p):
                    return math.hypot(p1[0] - p[0], p1[1] - p[1])

                optical_dist = min(
                    dist_to_p1(p) for p in optical_coordinates[1:])
                scale = electron_scale[0] / optical_dist
                report["Estimated scale"] = scale

                # max_allowed_diff in pixels
                max_allowed_diff_px = max_allowed_diff / escan.pixelSize.value[
                    0]

                # Match the electron to optical coordinates
                if future._find_overlay_state == CANCELLED:
                    raise CancelledError()

                logging.debug("Matching coordinates...")
                try:
                    known_ec, known_oc, max_diff = coordinates.MatchCoordinates(
                        optical_coordinates, electron_coordinates, scale,
                        max_allowed_diff_px)
                except LookupError as exp:
                    raise OverlayError(
                        "Failed to match SEM and optical coordinates: %s" %
                        (exp, ))

                report["Matched coordinates in SEM ref"] = known_ec
                report["Matched coordinates in Optical ref"] = known_oc
                report["Maximum distance between matches"] = max_diff

                # Calculate transformation parameters
                if future._find_overlay_state == CANCELLED:
                    raise CancelledError()

                # We are almost done... about 1 s left
                future.set_progress(end=time.time() + 1)

                logging.debug("Calculating transformation...")
                try:
                    ret = transform.CalculateTransform(known_ec, known_oc,
                                                       skew)
                except ValueError as exp:
                    raise OverlayError(
                        "Failed to calculate transformation: %s" % (exp, ))

                if future._find_overlay_state == CANCELLED:
                    raise CancelledError()

                logging.debug("Calculating transform metadata...")
                if skew is True:
                    transform_d, skew_d = _transformMetadata(
                        optical_image, ret, escan, ccd, skew)
                    transform_data = (transform_d, skew_d)
                else:
                    transform_d = _transformMetadata(
                        optical_image, ret, escan, ccd, skew
                    )  # Also indicate which dwell time eventually worked
                    transform_data = transform_d
                transform_d[model.MD_DWELL_TIME] = dwell_time

                # Everything went fine
                # _MakeReport("No problem", report, optical_image, subimages)  # DEBUG
                logging.debug("Overlay done.")
                return ret, transform_data
            except OverlayError as exp:
                # Make failure report
                _MakeReport(str(exp), report, optical_image, subimages)
                # Maybe it's just due to a bad SNR => retry with longer dwell time
                future._gscanner.dwell_time = future._gscanner.dwell_time * 1.2 + 0.1
        else:
            raise ValueError("Overlay failure after %d attempts" %
                             (MAX_TRIALS_NUMBER, ))

    except CancelledError:
        pass
    except Exception as exp:
        logging.debug("Finding overlay failed", exc_info=1)
        raise exp
    finally:
        _set_blanker(escan, True)
        with future._overlay_lock:
            future._done.set()
            if future._find_overlay_state == CANCELLED:
                raise CancelledError()
            future._find_overlay_state = FINISHED
Ejemplo n.º 28
0
def _DoBinaryFocus(future, detector, emt, focus, dfbkg, good_focus, rng_focus):
    """
    Iteratively acquires an optical image, measures its focus level and adjusts
    the optical focus with respect to the focus level.
    future (model.ProgressiveFuture): Progressive future provided by the wrapper
    detector: model.DigitalCamera or model.Detector
    emt (None or model.Emitter): In case of a SED this is the scanner used
    focus (model.Actuator): The optical focus
    dfbkg (model.DataFlow): dataflow of se- or bs- detector
    good_focus (float): if provided, an already known good focus position to be
      taken into consideration while autofocusing
    rng_focus (tuple): if provided, the search of the best focus position is limited
      within this range
    returns:
        (float): Focus position (m)
        (float): Focus level
    raises:
            CancelledError if cancelled
            IOError if procedure failed
    """
    # TODO: dfbkg is mis-named, as it's the dataflow to use to _activate_ the
    # emitter. To acquire the background, it's specifically not used.

    # It does a dichotomy search on the focus level. In practice, it means it
    # will start going into the direction that increase the focus with big steps
    # until the focus decreases again. Then it'll bounce back and forth with
    # smaller and smaller steps.
    # The tricky parts are:
    # * it's hard to estimate the focus level (on a random image)
    # * two acquisitions at the same focus position can have (slightly) different
    #   focus levels (due to noise and sample degradation)
    # * if the focus actuator is not precise (eg, open loop), it's hard to
    #   even go back to the same focus position when wanted
    logging.debug("Starting binary autofocus on detector %s...", detector.name)

    try:
        # use the .depthOfField on detector or emitter as maximum stepsize
        avail_depths = (detector, emt)
        if model.hasVA(emt, "dwellTime"):
            # Hack in case of using the e-beam with a DigitalCamera detector.
            # All the digital cameras have a depthOfField, which is updated based
            # on the optical lens properties... but the depthOfField in this
            # case depends on the e-beam lens.
            avail_depths = (emt, detector)
        for c in avail_depths:
            if model.hasVA(c, "depthOfField"):
                dof = c.depthOfField.value
                break
        else:
            logging.debug("No depth of field info found")
            dof = 1e-6  # m, not too bad value
        logging.debug("Depth of field is %f", dof)
        min_step = dof / 2

        # adjust to rng_focus if provided
        rng = focus.axes["z"].range
        if rng_focus:
            rng = (max(rng[0], rng_focus[0]), min(rng[1], rng_focus[1]))

        max_step = (rng[1] - rng[0]) / 2
        if max_step <= 0:
            raise ValueError("Unexpected focus range %s" % (rng, ))

        max_reached = False  # True once we've passed the maximum level (ie, start bouncing)
        # It's used to cache the focus level, to avoid reacquiring at the same
        # position. We do it only for the 'rough' max search because for the fine
        # search, the actuator and acquisition delta are likely to play a role
        focus_levels = {}  # focus pos (float) -> focus level (float)

        best_pos = focus.position.value['z']
        best_fm = 0
        last_pos = None

        # Pick measurement method based on the heuristics that SEM detectors
        # are typically just a point (ie, shape == data depth).
        # TODO: is this working as expected? Alternatively, we could check
        # MD_DET_TYPE.
        if len(detector.shape) > 1:
            logging.debug("Using Optical method to estimate focus")
            Measure = MeasureOpticalFocus
        else:
            logging.debug("Using SEM method to estimate focus")
            Measure = MeasureSEMFocus

        step_factor = 2**7
        if good_focus is not None:
            current_pos = focus.position.value['z']
            image = AcquireNoBackground(detector, dfbkg)
            fm_current = Measure(image)
            logging.debug("Focus level at %f is %f", current_pos, fm_current)
            focus_levels[current_pos] = fm_current

            focus.moveAbsSync({"z": good_focus})
            image = AcquireNoBackground(detector, dfbkg)
            fm_good = Measure(image)
            logging.debug("Focus level at %f is %f", good_focus, fm_good)
            focus_levels[good_focus] = fm_good
            last_pos = good_focus

            if fm_good < fm_current:
                # Move back to current position if good_pos is not that good
                # after all
                focus.moveAbsSync({"z": current_pos})
                # it also means we are pretty close
            step_factor = 2**4

        if step_factor * min_step > max_step:
            # Large steps would be too big. We can reduce step_factor and/or
            # min_step. => let's take our time, and maybe find finer focus
            min_step = max_step / step_factor
            logging.debug("Reducing min step to %g", min_step)

        # TODO: to go a bit faster, we could use synchronised acquisition on
        # the detector (if it supports it)
        # TODO: we could estimate the quality of the autofocus by looking at the
        # standard deviation of the the focus levels (and the standard deviation
        # of the focus levels measured for the same focus position)
        logging.debug("Step factor used for autofocus: %g", step_factor)
        step_cntr = 1
        while step_factor >= 1 and step_cntr <= MAX_STEPS_NUMBER:
            # TODO: update the estimated time (based on how long it takes to
            # move + acquire, and how many steps are approximately left)

            # Start at the current focus position
            center = focus.position.value['z']
            # Don't redo the acquisition either if we've just done it, or if it
            # was already done and we are still doing a rough search
            if (not max_reached
                    or last_pos == center) and center in focus_levels:
                fm_center = focus_levels[center]
            else:
                image = AcquireNoBackground(detector, dfbkg)
                fm_center = Measure(image)
                logging.debug("Focus level (center) at %f is %f", center,
                              fm_center)
                focus_levels[center] = fm_center

            # Move to right position
            right = center + step_factor * min_step
            right = max(rng[0], min(right, rng[1]))  # clip
            if not max_reached and right in focus_levels:
                fm_right = focus_levels[right]
            else:
                focus.moveAbsSync({"z": right})
                right = focus.position.value["z"]
                image = AcquireNoBackground(detector, dfbkg)
                fm_right = Measure(image)
                logging.debug("Focus level (right) at %f is %f", right,
                              fm_right)
                focus_levels[right] = fm_right

            # Move to left position
            left = center - step_factor * min_step
            left = max(rng[0], min(left, rng[1]))  # clip
            if not max_reached and left in focus_levels:
                fm_left = focus_levels[left]
            else:
                focus.moveAbsSync({"z": left})
                left = focus.position.value["z"]
                image = AcquireNoBackground(detector, dfbkg)
                fm_left = Measure(image)
                logging.debug("Focus level (left) at %f is %f", left, fm_left)
                focus_levels[left] = fm_left
                last_pos = left

            fm_range = (fm_left, fm_center, fm_right)
            pos_range = (left, center, right)
            best_fm = max(fm_range)
            i_max = fm_range.index(best_fm)
            best_pos = pos_range[i_max]

            if future._autofocus_state == CANCELLED:
                raise CancelledError()

            # if best focus was found at the center
            if i_max == 1:
                step_factor /= 2
                if not max_reached:
                    logging.debug("Now zooming in on improved focus")
                max_reached = True
            elif (rng[0] > best_pos - step_factor * min_step
                  or rng[1] < best_pos + step_factor * min_step):
                step_factor /= 1.5
                logging.debug(
                    "Reducing step factor to %g because the focus (%g) is near range limit %s",
                    step_factor, best_pos, rng)
                if step_factor <= 8:
                    max_reached = True  # Force re-checking data

            focus.moveAbsSync({"z": best_pos})
            step_cntr += 1

        if step_cntr == MAX_STEPS_NUMBER:
            logging.info("Auto focus gave up after %d steps @ %g m", step_cntr,
                         best_pos)
        else:
            logging.info("Auto focus found best level %g @ %g m", best_fm,
                         best_pos)

        return best_pos, best_fm

    except CancelledError:
        # Go to the best position known so far
        focus.moveAbsSync({"z": best_pos})
    finally:
        with future._autofocus_lock:
            if future._autofocus_state == CANCELLED:
                raise CancelledError()
            future._autofocus_state = FINISHED
Ejemplo n.º 29
0
    def _DoFit(self, future, spectrum, wavelength, type='gaussian'):
        """
        Smooths the spectrum signal, detects the peaks and applies the type of peak
        fitting required. Finally returns the optimized peak parameters.
        future (model.ProgressiveFuture): Progressive future provided by the wrapper
        spectrum (1d array of floats): The data representing the spectrum.
        wavelength (1d array of floats): The wavelength values corresponding to the
        spectrum given.
        type (str): Type of fitting to be applied (for now only ‘gaussian’ and
        ‘lorentzian’ are available).
        returns:
             params (list of 3-tuple): Each peak parameters as (pos, width, amplitude)
             offset (float): global offset to add
        raises:
                KeyError if given type not available
                ValueError if fitting cannot be applied
        """
        try:
            # values based on experimental datasets
            if len(wavelength) >= 2000:
                divider = 20
            elif len(wavelength) >= 1000:
                divider = 25
            else:
                divider = 30
            init_window_size = max(3, len(wavelength) // divider)
            window_size = init_window_size
            logging.debug(
                "Starting peak detection on data (len = %d) with window = %d",
                len(wavelength), window_size)
            try:
                width = PEAK_WIDTHS[type]
                FitFunction = PEAK_FUNCTIONS[type]
            except KeyError:
                raise KeyError(
                    "Given type %s not in available fitting types: %s" %
                    (type, list(PEAK_FUNCTIONS.keys())))
            for step in range(5):
                if future._fit_state == CANCELLED:
                    raise CancelledError()
                smoothed = Smooth(spectrum, window_len=window_size)
                # Increase window size until peak detection finds enough peaks to fit
                # the spectrum curve
                peaks = Detect(smoothed,
                               wavelength,
                               lookahead=window_size,
                               delta=5)[0]
                if not peaks:
                    window_size = int(round(window_size * 1.2))
                    logging.debug("Retrying to fit peak with window = %d",
                                  window_size)
                    continue

                fit_list = []
                for (pos, amplitude) in peaks:
                    fit_list.append(pos)
                    fit_list.append(width)
                    fit_list.append(amplitude)
                # Initialize offset to 0
                fit_list.append(0)

                if future._fit_state == CANCELLED:
                    raise CancelledError()

                try:
                    with warnings.catch_warnings():
                        # Hide scipy/optimize/minpack.py:690: OptimizeWarning: Covariance of the parameters could not be estimated
                        warnings.filterwarnings("ignore", "", OptimizeWarning)
                        # TODO, from scipy 0.17, curve_fit() supports the 'bounds' parameter.
                        # It could be used to ensure the peaks params are positives.
                        # (Once we don't support Ubuntu 12.04)
                        params, _ = curve_fit(FitFunction,
                                              wavelength,
                                              spectrum,
                                              p0=fit_list)
                    break
                except Exception:
                    window_size = int(round(window_size * 1.2))
                    logging.debug("Retrying to fit peak with window = %d",
                                  window_size)
                    continue
            else:
                raise ValueError("Could not apply peak fitting of type %s." %
                                 type)
            # reformat parameters to (list of 3 tuples, offset)
            peaks_params = []
            for pos, width, amplitude in _Grouped(params[:-1], 3):
                # Note: to avoid negative peaks, the fit functions only take the
                # absolute of the amplitude/width. So now amplitude and width
                # have 50% chances to be negative => Force positive now.
                peaks_params.append((pos, abs(width), abs(amplitude)))
            params = peaks_params, params[-1]
            return params
        except CancelledError:
            logging.debug("Fitting of type %s was cancelled.", type)
        finally:
            with future._fit_lock:
                if future._fit_state == CANCELLED:
                    raise CancelledError()
                future._fit_state = FINISHED
Ejemplo n.º 30
0
def _DoExhaustiveFocus(future, detector, emt, focus, dfbkg, good_focus,
                       rng_focus):
    """
    Moves the optical focus through the whole given range, measures the focus
    level on each position and ends up where the best focus level was found. In
    case a significant deviation was found while going through the range, it
    stops and limits the search within a smaller range around this position.
    future (model.ProgressiveFuture): Progressive future provided by the wrapper
    detector: model.DigitalCamera or model.Detector
    emt (None or model.Emitter): In case of a SED this is the scanner used
    focus (model.Actuator): The optical focus
    dfbkg (model.DataFlow): dataflow of se- or bs- detector
    good_focus (float): if provided, an already known good focus position to be
      taken into consideration while autofocusing
    rng_focus (tuple): if provided, the search of the best focus position is limited
      within this range
    returns:
        (float): Focus position (m)
        (float): Focus level
    raises:
            CancelledError if cancelled
            IOError if procedure failed
    """
    logging.debug("Starting exhaustive autofocus on detector %s...",
                  detector.name)

    try:
        # use the .depthOfField on detector or emitter as maximum stepsize
        avail_depths = (detector, emt)
        if model.hasVA(emt, "dwellTime"):
            # Hack in case of using the e-beam with a DigitalCamera detector.
            # All the digital cameras have a depthOfField, which is updated based
            # on the optical lens properties... but the depthOfField in this
            # case depends on the e-beam lens.
            avail_depths = (emt, detector)
        for c in avail_depths:
            if model.hasVA(c, "depthOfField"):
                dof = c.depthOfField.value
                break
        else:
            logging.debug("No depth of field info found")
            dof = 1e-6  # m, not too bad value
        logging.debug("Depth of field is %f", dof)

        # Pick measurement method based on the heuristics that SEM detectors
        # are typically just a point (ie, shape == data depth).
        # TODO: is this working as expected? Alternatively, we could check
        # MD_DET_TYPE.
        if len(detector.shape) > 1:
            logging.debug("Using Optical method to estimate focus")
            Measure = MeasureOpticalFocus
        else:
            logging.debug("Using SEM method to estimate focus")
            Measure = MeasureSEMFocus

        # adjust to rng_focus if provided
        rng = focus.axes["z"].range
        if rng_focus:
            rng = (max(rng[0], rng_focus[0]), min(rng[1], rng_focus[1]))

        if good_focus:
            focus.moveAbsSync({"z": good_focus})

        focus_levels = []  # list with focus levels measured so far
        best_pos = orig_pos = focus.position.value['z']
        best_fm = 0

        if future._autofocus_state == CANCELLED:
            raise CancelledError()

        # Based on our measurements on spot detection, a spot is visible within
        # a margin of ~30microns around its best focus position. Such a step
        # (i.e. ~ 6microns) ensures that we will eventually be able to notice a
        # difference compared to the focus levels measured so far.
        step = 8 * dof
        lower_bound, upper_bound = rng
        # start moving upwards until we reach the upper bound or we find some
        # significant deviation in focus level
        # we know that upper_bound is excluded but: 1. realistically the best focus
        # position would not be there 2. the upper_bound - orig_pos range is not
        # expected to be precisely a multiple of the step anyway
        for next_pos in numpy.arange(orig_pos, upper_bound, step):
            focus.moveAbsSync({"z": next_pos})
            image = AcquireNoBackground(detector, dfbkg)
            new_fm = Measure(image)
            focus_levels.append(new_fm)
            logging.debug("Focus level at %f is %f", next_pos, new_fm)
            if new_fm >= best_fm:
                best_fm = new_fm
                best_pos = next_pos
            if len(focus_levels) >= 10 and AssessFocus(focus_levels):
                # trigger binary search on if significant deviation was
                # found in current position
                return _DoBinaryFocus(
                    future, detector, emt, focus, dfbkg, best_pos,
                    (best_pos - 2 * step, best_pos + 2 * step))

        if future._autofocus_state == CANCELLED:
            raise CancelledError()

        # if nothing was found return to original position and start going
        # downwards
        focus.moveAbsSync({"z": orig_pos})
        for next_pos in numpy.arange(orig_pos - step, lower_bound, -step):
            focus.moveAbsSync({"z": next_pos})
            image = AcquireNoBackground(detector, dfbkg)
            new_fm = Measure(image)
            focus_levels.append(new_fm)
            logging.debug("Focus level at %f is %f", next_pos, new_fm)
            if new_fm >= best_fm:
                best_fm = new_fm
                best_pos = next_pos
            if len(focus_levels) >= 10 and AssessFocus(focus_levels):
                # trigger binary search on if significant deviation was
                # found in current position
                return _DoBinaryFocus(
                    future, detector, emt, focus, dfbkg, best_pos,
                    (best_pos - 2 * step, best_pos + 2 * step))

        if future._autofocus_state == CANCELLED:
            raise CancelledError()

        logging.debug(
            "No significant focus level was found so far, thus we just move to the best position found %f",
            best_pos)
        focus.moveAbsSync({"z": best_pos})
        return _DoBinaryFocus(future, detector, emt, focus, dfbkg, best_pos,
                              (best_pos - 2 * step, best_pos + 2 * step))

    except CancelledError:
        # Go to the best position known so far
        focus.moveAbsSync({"z": best_pos})
    finally:
        # Only used if for some reason the binary focus is not called (e.g. cancellation)
        with future._autofocus_lock:
            if future._autofocus_state == CANCELLED:
                raise CancelledError()
            future._autofocus_state = FINISHED