コード例 #1
0
ファイル: acquisition.py プロジェクト: thomasaarholt/odemis
    def _set_fan(self, enable):
        """
        Turn on/off the fan of the CCD
        enable (boolean): True to turn on/restore the fan, and False to turn if off
        """
        if not model.hasVA(self._main_data_model.ccd, "fanSpeed"):
            return

        fs = self._main_data_model.ccd.fanSpeed
        if enable:
            if self._orig_fan_speed is not None:
                fs.value = max(fs.value, self._orig_fan_speed)
        else:
            self._orig_fan_speed = fs.value
            fs.value = 0

        # Raise targetTemperature to max/ambient to avoid the fan from
        # automatically starting again. (Some hardware have this built-in when
        # the current temperature is too high compared to the target)
        if model.hasVA(self._main_data_model.ccd, "targetTemperature"):
            temp = self._main_data_model.ccd.targetTemperature
            if enable:
                if self._orig_fan_temp is not None:
                    temp.value = min(temp.value, self._orig_fan_temp)
            else:
                self._orig_fan_temp = temp.value
                # TODO: handle choices
                try:
                    temp.value = min(temp.range[1],
                                     25)  # don't set above ambient temperature
                except Exception:
                    logging.warning(
                        "Failed to change targetTemperature when disabling fan",
                        exc_info=True)
コード例 #2
0
def AutoFocus(detector, emt, focus, dfbkg=None, good_focus=None, rng_focus=None, method='binary'):
    """
    Wrapper for DoAutoFocus. It provides the ability to check the progress of autofocus 
    procedure or even cancel it.
    detector (model.DigitalCamera or model.Detector): Detector on which to
      improve the focus quality
    emt (None or model.Emitter): In case of a SED this is the scanner used
    focus (model.Actuator): The focus actuator
    dfbkg (model.DataFlow or None): If provided, will be used to start/stop
     the e-beam emission (it must be the dataflow of se- or bs-detector) in
     order to do background subtraction. If None, no background subtraction is
     performed.
    good_focus (float): if provided, an already known good focus position to be
      taken into consideration while autofocusing
    rng_focus (tuple): if provided, the search of the best focus position is limited
      within this range
    method (str): focusing method, if 'binary' we follow a binary method while in
      case of 'exhaustive' we iterate through the whole provided range
    returns (model.ProgressiveFuture):  Progress of DoAutoFocus, whose result() will return:
            Focus position (m)
            Focus level
    """
    # Create ProgressiveFuture and update its state to RUNNING
    est_start = time.time() + 0.1
    # Check if the emitter is a scanner (focusing = SEM)
    if model.hasVA(emt, "dwellTime"):
        et = emt.dwellTime.value * numpy.prod(emt.resolution.value)
    elif model.hasVA(detector, "exposureTime"):
        et = detector.exposureTime.value
    else:
        # Completely random... but we are in a case where probably that's the last
        # thing the caller will care about.
        et = 1

    f = model.ProgressiveFuture(start=est_start,
                                end=est_start + estimateAutoFocusTime(et))
    f._autofocus_state = RUNNING
    f._autofocus_lock = threading.Lock()
    f.task_canceller = _CancelAutoFocus

    # Run in separate thread
    if method == "exhaustive":
        autofocus_fn = _DoExhaustiveFocus
    elif method == "binary":
        autofocus_fn = _DoBinaryFocus
    else:
        raise ValueError("Unknown autofocus method")

    autofocus_thread = threading.Thread(target=executeTask,
                                        name="Autofocus",
                                        args=(f, autofocus_fn, f, detector, emt,
                                              focus, dfbkg, good_focus, rng_focus))

    autofocus_thread.start()
    return f
コード例 #3
0
ファイル: actuator.py プロジェクト: pieleric/odemis-old
    def __init__(self, name, role, children, backlash, **kwargs):
        """
        children (dict str -> Stage): dict containing one component, the stage
        to wrap
        backlash (dict str -> float): for each axis of the stage, the additional
        distance to move (and the direction). If an axis of the stage is not
        present, then it’s the same as having 0 as backlash (=> no antibacklash 
        motion is performed for this axis)

        """
        if len(children) != 1:
            raise ValueError("AntiBacklashActuator needs 1 child")

        for a, v in backlash.items():
            if not isinstance(a, basestring):
                raise ValueError("Backlash key must be a string but got '%s'" % (a,))
            if not isinstance(v, numbers.Real):
                raise ValueError("Backlash value of %s must be a number but got '%s'" % (a, v))

        self._child = children.values()[0]
        self._backlash = backlash
        axes_def = {}
        for an, ax in self._child.axes.items():
            axes_def[an] = copy.deepcopy(ax)
            axes_def[an].canUpdate = True

        # Whether currently a backlash shift is applied on an axis
        # If True, moving the axis by the backlash value would restore its expected position
        # _shifted_lock must be taken before modifying this attribute
        self._shifted = dict((a, False) for a in axes_def.keys())
        self._shifted_lock = threading.Lock()

        # look for axes in backlash not existing in the child
        missing = set(backlash.keys()) - set(axes_def.keys())
        if missing:
            raise ValueError("Child actuator doesn't have the axes %s" % (missing,))

        model.Actuator.__init__(self, name, role, axes=axes_def,
                                children=children, **kwargs)

        # will take care of executing axis moves asynchronously
        self._executor = CancellableThreadPoolExecutor(max_workers=1)  # one task at a time

        # Duplicate VAs which are just identical
        # TODO: shall we "hide" the antibacklash move by not updating position
        # while doing this move?
        self.position = self._child.position

        if model.hasVA(self._child, "referenced"):
            self.referenced = self._child.referenced
        if model.hasVA(self._child, "speed"):
            self.speed = self._child.speed
コード例 #4
0
ファイル: emitter.py プロジェクト: ihebdelmic/odemis
    def __init__(self, name, role, dependencies, **kwargs):
        """
        dependencies (dict str->Component): the two components to wrap together.
            The key must be "light" for the emitter component, and "clock" for the clock generator.
        """
        # This will create the .powerSupply VA
        model.Emitter.__init__(self,
                               name,
                               role,
                               dependencies=dependencies,
                               **kwargs)
        self._shape = ()

        # Determine child objects. Light
        try:
            self._light = dependencies["light"]
        except KeyError:
            raise ValueError("No 'light' child provided")
        if not isinstance(self._light, model.ComponentBase):
            raise ValueError("Child %s is not an emitter." %
                             (self._light.name, ))
        if not model.hasVA(self._light, 'power'):
            raise ValueError("Child %s has no power VA." %
                             (self._light.name, ))
        if not model.hasVA(self._light, 'emissions'):
            raise ValueError("Child %s has no emissions VA." %
                             (self._light.name, ))
        # Clock generator
        try:
            self._clock = dependencies["clock"]
        except KeyError:
            raise ValueError("No 'clock generator' child provided")
        if not isinstance(self._clock, model.ComponentBase):
            raise ValueError("Child %s is not a Component." %
                             (self._clock.name, ))
        if not model.hasVA(self._clock, "period"):
            raise ValueError("Child %s has no period VA." %
                             (self._clock.name, ))

        # Only one VA from the clock
        self.period = self._clock.period

        # All the other VAs are straight from the light
        self.emissions = self._light.emissions
        self.spectra = self._light.spectra
        self.power = self._light.power

        # Turn off/on the power of the clock based on the light power
        self.emissions.subscribe(self._onEmissions)
        self.power.subscribe(self._onPower)
コード例 #5
0
ファイル: cliccd.py プロジェクト: lazem/odemis
    def addst(self):
        main_data = self.main_app.main_data
        stctrl = self._tab.streambar_controller

        axes = stctrl._filter_axes(
            {"filter": ("band", main_data.light_filter)})

        # TODO: special live stream?
        ar_stream = ARSettingsStream(
            "CL intensity on CCD",
            main_data.ccd,
            main_data.ccd.data,
            main_data.ebeam,
            sstage=main_data.scan_stage,
            opm=main_data.opm,
            axis_map=axes,
            detvas=get_local_vas(main_data.ccd, main_data.hw_settings_config),
        )
        # TODO: Allow very large binning on the CCD

        # Make sure the binning is not crazy (especially can happen if CCD is shared for spectrometry)
        if model.hasVA(ar_stream, "detBinning"):
            b = ar_stream.detBinning.value
            if b[0] != b[1] or b[0] > 16:
                ar_stream.detBinning.value = ar_stream.detBinning.clip((1, 1))
                ar_stream.detResolution.value = ar_stream.detResolution.range[
                    1]

        # Create the equivalent MDStream
        sem_stream = self._tab.tab_data_model.semStream
        sem_cl_stream = SEMCLCCDStream("SEM CLi CCD", [sem_stream, ar_stream])

        return stctrl._addRepStream(ar_stream, sem_cl_stream)
コード例 #6
0
    def test_binning(self):
        if (not model.hasVA(self.camera, "binning") or
            self.camera.binning.readonly):
            self.skipTest("Camera doesn't support setting binning")

        self.camera.binning.value = (1, 1)
        max_binning = self.camera.binning.range[1]
        new_binning = (2, 2)
        if new_binning >= max_binning:
            # if there is no binning 2, let's not try
            self.skipTest("Camera doesn't support binning")

        # binning should automatically resize the image
        prev_size = self.camera.resolution.value
        self.camera.binning.value = new_binning
        self.assertNotEqual(self.camera.resolution.value, prev_size)

        # ask for the whole image
        self.size = (self.camera.shape[0] // 2, self.camera.shape[1] // 2)
        self.camera.resolution.value = self.size
        exposure = 0.1
        self.camera.exposureTime.value = exposure

        start = time.time()
        im = self.camera.data.get()
        duration = time.time() - start

        self.assertEqual(im.shape, self.size[::-1]) # TODO a small size diff is fine if bigger than requested
        self.assertGreaterEqual(duration, exposure, "Error execution took %f s, less than exposure time %f." % (duration, exposure))
        self.assertIn(model.MD_EXP_TIME, im.metadata)
        self.assertEqual(im.metadata[model.MD_BINNING], new_binning)
コード例 #7
0
ファイル: cliccd.py プロジェクト: delmic/odemis
    def addst(self):
        main_data = self.main_app.main_data

        # TODO: special live stream?
        ar_stream = ARSettingsStream(
            "CL intensity on CCD",
            main_data.ccd,
            main_data.ccd.data,
            main_data.ebeam,
            sstage=main_data.scan_stage,
            opm=main_data.opm,
            detvas=get_local_vas(main_data.ccd, main_data.hw_settings_config),
        )
        # TODO: Allow very large binning on the CCD

        # Make sure the binning is not crazy (especially can happen if CCD is shared for spectrometry)
        if model.hasVA(ar_stream, "detBinning"):
            b = ar_stream.detBinning.value
            if b[0] != b[1] or b[0] > 16:
                ar_stream.detBinning.value = ar_stream.detBinning.clip((1, 1))
                ar_stream.detResolution.value = ar_stream.detResolution.range[1]

        # Create the equivalent MDStream
        sem_stream = self._tab.tab_data_model.semStream
        sem_cl_stream = SEMCLCCDStream("SEM CLi CCD", [sem_stream, ar_stream])

        stctrl = self._tab.streambar_controller
        return stctrl._addRepStream(ar_stream, sem_cl_stream,
                                  axes={"band": main_data.light_filter}
                                  )
コード例 #8
0
ファイル: cam_test_abs.py プロジェクト: effting/odemis
    def test_change_settings(self):
        """
        Start an acquisition, and stop just before it should end, then change the
        settings, and acquire again. Check that we don't get the original acquisition
        with the old settings.
        (Such bug happened on the Andorcam2)
        """
        if not model.hasVA(self.camera,
                           "binning") or self.camera.binning.readonly:
            self.skipTest("Camera doesn't support setting binning")

        self.camera.binning.value = self.camera.binning.clip((2, 2))
        self.size = self.camera.resolution.value
        self.camera.exposureTime.value = 0.09
        self.left = 1

        self.camera.data.subscribe(self.receive_image)
        time.sleep(0.05)
        logging.debug("Stopping acquisition")
        self.camera.data.unsubscribe(self.receive_image)
        # Never received an image...
        self.assertEqual(self.left, 1)

        # Change settings
        logging.debug("Changing binning")
        self.camera.binning.value = (1, 1)
        exp_res = self.camera.resolution.value[::-1]
        da = self.camera.data.get()
        logging.debug("Got res of %s", da.shape)
        self.assertEqual(da.shape, exp_res)
コード例 #9
0
ファイル: _static.py プロジェクト: lanery/odemis
 def _updateHistogram(self, data=None):
     if data is None and model.hasVA(self, "zIndex"):
         data = self.raw[0]
         dims = data.metadata.get(model.MD_DIMS, "CTZYX"[-data.ndim::])
         if dims == "ZYX" and data.ndim == 3:
             data = img.getYXFromZYX(data, self.zIndex.value)  # Remove extra dimensions (of length 1)
     super(Static2DStream, self)._updateHistogram(data)
コード例 #10
0
ファイル: _tiledacq.py プロジェクト: lanery/odemis
    def _moveToTile(self, idx, prev_idx, tile_size):
        """
        Move the stage to the tile position
        :param idx: (tuple (float, float)) current index of tile
        :param prev_idx: (tuple (float, float)) previous index of tile
        :param tile_size: (tuple (float, float)) total tile size
        """
        overlap = 1 - self._overlap
        # don't move on the axis that is not supposed to have changed
        m = {}
        idx_change = numpy.subtract(idx, prev_idx)
        if idx_change[0]:
            m["x"] = self._starting_pos["x"] + idx[0] * tile_size[0] * overlap
        if idx_change[1]:
            m["y"] = self._starting_pos["y"] - idx[1] * tile_size[1] * overlap

        logging.debug("Moving to tile %s at %s m", idx, m)
        f = self._stage.moveAbs(m)
        try:
            speed = min(self._stage.speed.value.values()) if model.hasVA(
                self._stage, "speed") else 10e-6
            # add 1 to make sure it doesn't time out in case of a very small move
            t = math.hypot(
                abs(idx_change[0]) * tile_size[0] * overlap,
                abs(idx_change[1]) * tile_size[1] * overlap) / speed + 1
            f.result(t)
        except TimeoutError:
            logging.warning("Failed to move to tile %s", idx)
            self._future.running_subf.cancel()
コード例 #11
0
ファイル: _static.py プロジェクト: delmic/odemis
 def _updateHistogram(self, data=None):
     if data is None and model.hasVA(self, "zIndex"):
         data = self.raw[0]
         dims = data.metadata.get(model.MD_DIMS, "CTZYX"[-data.ndim::])
         if dims == "ZYX" and data.ndim == 3:
             data = img.getYXFromZYX(data, self.zIndex.value)  # Remove extra dimensions (of length 1)
     super(Static2DStream, self)._updateHistogram(data)
コード例 #12
0
ファイル: sracq.py プロジェクト: ihebdelmic/odemis
    def start(self):
        """
        Called when the menu entry is selected
        """
        main_data = self.main_app.main_data

        # Stop the streams
        tab_data = main_data.tab.value.tab_data_model
        for s in tab_data.streams.value:
            s.should_update.value = False

        self.filename.value = self._get_new_filename()
        self._update_exp_dur()

        # Special CCD settings to get values as photon counting
        if model.hasVA(self.ccd, "countConvert"):
            self.ccd.countConvert.value = 2  # photons

        dlg = AcquisitionDialog(
            self, "Super-resolution acquisition",
            "Acquires a series of shortly exposed images, "
            "and store them in sequence.\n"
            "Note, the advanced settings are only applied "
            "after restarting the stream.")
        dlg.addStream(self._stream)
        dlg.addSettings(self, self.vaconf)
        dlg.addButton("Close")
        dlg.addButton("Acquire", self.acquire, face_colour='blue')
        dlg.Maximize()
        ans = dlg.ShowModal()

        # Make sure the stream is not playing anymore and CCD is back to normal
        self._stream.should_update.value = False
        if model.hasVA(self.ccd, "countConvert"):
            try:
                self.ccd.countConvert.value = 0  # normal
            except Exception:
                logging.exception("Failed to set back count convert mode")

        if ans == 0:
            logging.info("Acquisition cancelled")
        elif ans == 1:
            logging.info("Acquisition completed")
        else:
            logging.warning("Got unknown return code %s", ans)

        dlg.Destroy()
コード例 #13
0
ファイル: blanker_spectrum.py プロジェクト: lanery/odemis
    def addSpectrum(self, name, detector):
        """
        name (str): name of the stream
        detector (DigitalCamera): spectrometer to acquire the spectrum
        """
        logging.debug("Adding spectrum stream for %s", detector.name)

        main_data = self.main_app.main_data
        stctrl = self._tab.streambar_controller

        spg = stctrl._getAffectingSpectrograph(detector)

        axes = {
            "wavelength": ("wavelength", spg),
            "grating": ("grating", spg),
            "slit-in": ("slit-in", spg),
        }

        # Also add light filter for the spectrum stream if it affects the detector
        for fw in (main_data.cl_filter, main_data.light_filter):
            if fw is None:
                continue
            if detector.name in fw.affects.value:
                axes["filter"] = ("band", fw)
                break

        axes = stctrl._filter_axes(axes)

        if model.hasVA(main_data.ebeam, "blanker"):
            blanker = main_data.ebeam.blanker
        else:
            logging.warning(
                "E-beam doesn't support blanker, but trying to use a BlankerSpectrum stream"
            )
            blanker = None

        spec_stream = BlSpectrumSettingsStream(
            name,
            detector,
            detector.data,
            main_data.ebeam,
            sstage=main_data.scan_stage,
            opm=main_data.opm,
            axis_map=axes,
            detvas=get_local_vas(detector, main_data.hw_settings_config),
            blanker=blanker)
        stctrl._set_default_spectrum_axes(spec_stream)

        # Create the equivalent MDStream
        sem_stream = self._tab.tab_data_model.semStream
        sem_spec_stream = BlSEMSpectrumMDStream("SEM " + name,
                                                [sem_stream, spec_stream])

        ret = stctrl._addRepStream(spec_stream, sem_spec_stream)

        # Force the ROI to full FoV, as for the alignment with the SEM image, we need to have always a full image
        spec_stream.roi.value = (0, 0, 1, 1)

        return ret
コード例 #14
0
ファイル: _projection.py プロジェクト: pieleric/odemis
    def __init__(self, stream):

        super(SinglePointSpectrumProjection, self).__init__(stream)
        self.stream.selected_pixel.subscribe(self._on_selected_pixel)
        self.stream.selectionWidth.subscribe(self._on_selected_width)
        if model.hasVA(self.stream, "selected_time"):
            self.stream.selected_time.subscribe(self._on_selected_time)
        self.stream.calibrated.subscribe(self._on_new_spec_data, init=True)
コード例 #15
0
ファイル: sracq.py プロジェクト: delmic/odemis
    def start(self):
        """
        Called when the menu entry is selected
        """
        main_data = self.main_app.main_data

        # Stop the streams
        tab_data = main_data.tab.value.tab_data_model
        for s in tab_data.streams.value:
            s.should_update.value = False

        self.filename.value = self._get_new_filename()
        self._update_exp_dur()

        # Special CCD settings to get values as photon counting
        if model.hasVA(self.ccd, "countConvert"):
            self.ccd.countConvert.value = 2  # photons

        dlg = AcquisitionDialog(self, "Super-resolution acquisition",
                                "Acquires a series of shortly exposed images, "
                                "and store them in sequence.\n"
                                "Note, the advanced settings are only applied "
                                "after restarting the stream.")
        dlg.addStream(self._stream)
        dlg.addSettings(self, self.vaconf)
        dlg.addButton("Close")
        dlg.addButton("Acquire", self.acquire, face_colour='blue')
        dlg.Maximize()
        ans = dlg.ShowModal()

        # Make sure the stream is not playing anymore and CCD is back to normal
        self._stream.should_update.value = False
        if model.hasVA(self.ccd, "countConvert"):
            try:
                self.ccd.countConvert.value = 0  # normal
            except Exception:
                logging.exception("Failed to set back count convert mode")

        if ans == 0:
            logging.info("Acquisition cancelled")
        elif ans == 1:
            logging.info("Acquisition completed")
        else:
            logging.warning("Got unknown return code %s", ans)

        dlg.Destroy()
コード例 #16
0
    def test_temp(self):
        if not model.hasVA(self.camera, "targetTemperature"):
            self.skipTest("Camera doesn't support setting temperature")

        ttemp = self.camera.targetTemperature.value
        self.assertTrue(-300 < ttemp and ttemp < 100)
        self.camera.targetTemperature.value = self.camera.targetTemperature.range[0]
        self.assertEqual(self.camera.targetTemperature.value, self.camera.targetTemperature.range[0])
コード例 #17
0
ファイル: emitter.py プロジェクト: ihebdelmic/odemis
    def __init__(self, name, role, dependencies, **kwargs):
        """
        dependencies (dict str -> Emitter): arbitrary role -> emitter to be used as
          part of this emitter. All its provided emissions will be provided.
        """
        # TODO: allow to only use a subset of the emissions from each child

        if not dependencies:
            raise ValueError("MultiplexLight needs dependencies")

        model.Emitter.__init__(self,
                               name,
                               role,
                               dependencies=dependencies,
                               **kwargs)
        self._shape = ()

        self._child_idx = {
        }  # Emitter -> index (shift) in the emissions/spectra

        spectra = []
        for n, child in dependencies.items():
            if not (model.hasVA(child, "power") and model.hasVA(
                    child, "emissions") and model.hasVA(child, "spectra")):
                raise ValueError("Child %s is not a light emitter" % (n, ))
            self._child_idx[child] = len(spectra)
            spectra.extend(child.spectra.value)
            # TODO: update emissions whenever the child emissions change

        # Child with the maximum power range
        max_power = max(c.power.range[1] for c in self.dependencies.value)
        self.power = model.FloatContinuous(0, (0., max_power), unit="W")
        self.power.subscribe(self._updatePower)

        # info on which source is which wavelength
        self.spectra = model.ListVA(spectra, unit="m", readonly=True)

        # It needs .spectra and .power
        pwr, em = self._readPwrEmissions()
        self.power._value = pwr

        # ratio of power per source
        # if some source don't support max power, clamped before 1
        self.emissions = model.ListVA(em, unit="", setter=self._setEmissions)
コード例 #18
0
def AutoFocusSpectrometer(spectrograph, focuser, detectors, selector=None):
    """
    Run autofocus for a spectrograph. It will actually run autofocus on each
    gratings, and for each detectors. The input slit should already be in a
    good position (typically, almost closed), and a light source should be
    active.
    Note: it's currently tailored to the Andor Shamrock SR-193i. It's recommended
    to put the detector on the "direct" output as first detector.
    spectrograph (Actuator): should have grating and wavelength.
    focuser (Actuator): should have a z axis
    detectors (Detector or list of Detectors): all the detectors available on
      the spectrometer. The first detector will be used to autofocus all the
      gratings, and each other detector will be focused with the original
      grating.
    selector (Actuator or None): must have a rx axis with each position corresponding
     to one of the detectors. If there is only one detector, selector can be None.
    return (ProgressiveFuture -> dict((grating, detector)->focus position)): a progressive future
      which will eventually return a map of grating/detector -> focus position.
    """
    if not isinstance(detectors, collections.Iterable):
        detectors = [detectors]
    if not detectors:
        raise ValueError("At least one detector must be provided")
    if len(detectors) > 1 and selector is None:
        raise ValueError("No selector provided, but multiple detectors")

    # Create ProgressiveFuture and update its state to RUNNING
    est_start = time.time() + 0.1
    detector = detectors[0]
    if model.hasVA(detector, "exposureTime"):
        et = detector.exposureTime.value
    else:
        # Completely random... but we are in a case where probably that's the last
        # thing the caller will care about.
        et = 1

    # 1 time / grating + 1 time / extra detector
    cnts = len(spectrograph.axes["grating"].choices) + (len(detectors) - 1)
    f = model.ProgressiveFuture(start=est_start,
                                end=est_start +
                                cnts * estimateAutoFocusTime(et))
    f.task_canceller = _CancelAutoFocusSpectrometer
    # Extra info for the canceller
    f._autofocus_state = RUNNING
    f._autofocus_lock = threading.Lock()
    f._subfuture = InstantaneousFuture()

    # Run in separate thread
    autofocus_thread = threading.Thread(target=executeTask,
                                        name="Spectrometer Autofocus",
                                        args=(f, _DoAutoFocusSpectrometer, f,
                                              spectrograph, focuser, detectors,
                                              selector))

    autofocus_thread.start()
    return f
コード例 #19
0
ファイル: _projection.py プロジェクト: pieleric/odemis
    def __init__(self, stream):

        super(LineSpectrumProjection, self).__init__(stream)

        if model.hasVA(self.stream, "selected_time"):
            self.stream.selected_time.subscribe(self._on_selected_time)
        self.stream.selectionWidth.subscribe(self._on_selected_width)
        self.stream.selected_line.subscribe(self._on_selected_line)
        self.stream.calibrated.subscribe(self._on_new_data)
        self._shouldUpdateImage()
コード例 #20
0
ファイル: cam_test_abs.py プロジェクト: lanery/odemis
    def test_temp(self):
        if not model.hasVA(self.camera, "targetTemperature"):
            self.skipTest("Camera doesn't support setting temperature")

        ttemp = self.camera.targetTemperature.value
        self.assertTrue(-300 < ttemp < 100)
        self.camera.targetTemperature.value = self.camera.targetTemperature.range[
            0]
        self.assertEqual(self.camera.targetTemperature.value,
                         self.camera.targetTemperature.range[0])
コード例 #21
0
ファイル: cam_test_abs.py プロジェクト: pieleric/odemis
    def test_basic(self):
        """
        check the synchronization of the SEM with the CCD:
        The SEM scans a region and for each point, the CCD acquires one image.
        """
        start = time.time()
        exp = 50e-3  # s
        # in practice, it takes up to 500ms to take an image of 50 ms exposure
        numbert = numpy.prod(self.sem_size)

        # use large binning, to reduce the resolution
        if model.hasVA(self.ccd, "binning") and not self.ccd.binning.readonly:
            self.ccd.binning.value = self.ccd.binning.clip((4, 4))

        self.ccd_size = self.ccd.resolution.value
        self.ccd.exposureTime.value = exp
        # magical formula to get a long enough dwell time.
        # works with PVCam and Andorcam, but is probably different with other drivers :-(
        readout = numpy.prod(self.ccd_size) / self.ccd.readoutRate.value
        # it seems with the iVac, 20ms is enough to account for the overhead and extra image acquisition
        self.scanner.dwellTime.value = (exp + readout) * 1.1 + 0.2
        self.scanner.resolution.value = self.sem_size
        # pixel write/read setup is pretty expensive ~10ms
        expected_duration = numbert * (self.scanner.dwellTime.value + 0.01)

        self.sem_left = 1 # unsubscribe just after one
        self.ccd_left = numbert # unsubscribe after receiving

        try:
            self.ccd.data.synchronizedOn(self.scanner.newPosition)
        except IOError:
            self.skipTest("Camera doesn't support synchronisation")
        self.ccd.data.subscribe(self.receive_ccd_image)

        self.sed.data.subscribe(self.receive_sem_data)
        for i in range(10):
            # * 3 because it can be quite long to setup each pixel.
            time.sleep(expected_duration * 2 / 10)
            if self.sem_left == 0:
                break # just to make it quicker if it's quicker

        self.ccd.data.unsubscribe(self.receive_ccd_image)
        self.sed.data.unsubscribe(self.receive_sem_data)
        self.ccd.data.synchronizedOn(None)

        logging.info("Took %g s", self.end_time - start)
        time.sleep(exp + readout)
        self.assertEqual(self.sem_left, 0)
        self.assertEqual(self.ccd_left, 0)

        # check we can still get data normally
        d = self.ccd.data.get()

        time.sleep(0.1)
コード例 #22
0
ファイル: autofocus.py プロジェクト: pieleric/odemis
def estimateAcquisitionTime(detector, scanner=None):
    """
    Estimate how long one acquisition will take
    detector (model.DigitalCamera or model.Detector): Detector on which to
      improve the focus quality
    scanner (None or model.Emitter): In case of a SED this is the scanner used
    return (0<float): time in s
    """
    # Check if there is a scanner (focusing = SEM)
    if model.hasVA(scanner, "dwellTime"):
        et = scanner.dwellTime.value * numpy.prod(scanner.resolution.value)
    elif model.hasVA(detector, "exposureTime"):
        et = detector.exposureTime.value
        # TODO: also add readoutRate * resolution if present
    else:
        # Completely random... but we are in a case where probably that's the last
        # thing the caller will care about.
        et = 1

    return et
コード例 #23
0
ファイル: mdupdater.py プロジェクト: Mahmood-B/odemis
    def observeQWP(self, qwp, comp_affected):

        if model.hasVA(qwp, "position"):
            def updatePosition(pos, comp_affected=comp_affected):
                md = {model.MD_POL_POS_QWP: pos["rz"]}
                comp_affected.updateMetadata(md)

            qwp.position.subscribe(updatePosition, init=True)
            self._onTerminate.append((qwp.position.unsubscribe, (updatePosition,)))

        return True
コード例 #24
0
ファイル: mdupdater.py プロジェクト: Mahmood-B/odemis
    def observePolAnalyzer(self, analyzer, comp_affected):

        if model.hasVA(analyzer, "position"):
            def updatePosition(pos, comp_affected=comp_affected):
                md = {model.MD_POL_MODE: pos["pol"]}
                comp_affected.updateMetadata(md)

            analyzer.position.subscribe(updatePosition, init=True)
            self._onTerminate.append((analyzer.position.unsubscribe, (updatePosition,)))

        return True
コード例 #25
0
ファイル: mdupdater.py プロジェクト: Mahmood-B/odemis
    def observeLinPol(self, linpol, comp_affected):

        if model.hasVA(linpol, "position"):
            def updatePosition(pos, comp_affected=comp_affected):
                md = {model.MD_POL_POS_LINPOL: pos["rz"]}
                comp_affected.updateMetadata(md)

            linpol.position.subscribe(updatePosition, init=True)
            self._onTerminate.append((linpol.position.unsubscribe, (updatePosition,)))

        return True
コード例 #26
0
ファイル: cam_test_abs.py プロジェクト: lanery/odemis
    def test_basic(self):
        """
        check the synchronization of the SEM with the CCD:
        The SEM scans a region and for each point, the CCD acquires one image.
        """
        start = time.time()
        exp = 50e-3  # s
        # in practice, it takes up to 500ms to take an image of 50 ms exposure
        numbert = numpy.prod(self.sem_size)

        # use large binning, to reduce the resolution
        if model.hasVA(self.ccd, "binning") and not self.ccd.binning.readonly:
            self.ccd.binning.value = self.ccd.binning.clip((4, 4))

        self.ccd_size = self.ccd.resolution.value
        self.ccd.exposureTime.value = exp
        # magical formula to get a long enough dwell time.
        # works with PVCam and Andorcam, but is probably different with other drivers :-(
        readout = numpy.prod(self.ccd_size) / self.ccd.readoutRate.value
        # it seems with the iVac, 20ms is enough to account for the overhead and extra image acquisition
        self.scanner.dwellTime.value = (exp + readout) * 1.1 + 0.2
        self.scanner.resolution.value = self.sem_size
        # pixel write/read setup is pretty expensive ~10ms
        expected_duration = numbert * (self.scanner.dwellTime.value + 0.01)

        self.sem_left = 1  # unsubscribe just after one
        self.ccd_left = numbert  # unsubscribe after receiving

        try:
            self.ccd.data.synchronizedOn(self.scanner.newPosition)
        except IOError:
            self.skipTest("Camera doesn't support synchronisation")
        self.ccd.data.subscribe(self.receive_ccd_image)

        self.sed.data.subscribe(self.receive_sem_data)
        for i in range(10):
            # * 3 because it can be quite long to setup each pixel.
            time.sleep(expected_duration * 2 / 10)
            if self.sem_left == 0:
                break  # just to make it quicker if it's quicker

        self.ccd.data.unsubscribe(self.receive_ccd_image)
        self.sed.data.unsubscribe(self.receive_sem_data)
        self.ccd.data.synchronizedOn(None)

        logging.info("Took %g s", self.end_time - start)
        time.sleep(exp + readout)
        self.assertEqual(self.sem_left, 0)
        self.assertEqual(self.ccd_left, 0)

        # check we can still get data normally
        d = self.ccd.data.get()

        time.sleep(0.1)
コード例 #27
0
    def test_fan(self):
        if not model.hasVA(self.camera, "fanSpeed"):
            self.skipTest("Camera doesn't support setting fan speed")

        orig_fs = self.camera.fanSpeed.value
        self.camera.fanSpeed.value = self.camera.fanSpeed.range[0]
        self.assertEqual(self.camera.fanSpeed.value, self.camera.fanSpeed.range[0])
        self.camera.fanSpeed.value = self.camera.fanSpeed.range[1]
        self.assertEqual(self.camera.fanSpeed.value, self.camera.fanSpeed.range[1])
        self.camera.fanSpeed.value = orig_fs
        self.assertEqual(self.camera.fanSpeed.value, orig_fs)
コード例 #28
0
ファイル: emitter.py プロジェクト: pieleric/odemis
    def __init__(self, name, role, children, **kwargs):
        """
        children (dict str->Component): the two components to wrap together.
            The key must be "light" for the emitter component, and "clock" for the clock generator.
        """
        # This will create the .powerSupply VA
        model.Emitter.__init__(self, name, role, children=children, **kwargs)
        self._shape = ()

        # Determine child objects. Light
        try:
            self._light = children["light"]
        except KeyError:
            raise ValueError("No 'light' child provided")
        if not isinstance(self._light, model.ComponentBase):
            raise ValueError("Child %s is not an emitter." % (self._light.name,))
        if not model.hasVA(self._light, 'power'):
            raise ValueError("Child %s has no power VA." % (self._light.name,))
        if not model.hasVA(self._light, 'emissions'):
            raise ValueError("Child %s has no emissions VA." % (self._light.name,))
        # Clock generator
        try:
            self._clock = children["clock"]
        except KeyError:
            raise ValueError("No 'clock generator' child provided")
        if not isinstance(self._clock,  model.ComponentBase):
            raise ValueError("Child %s is not a Component." % (self._clock.name,))
        if not model.hasVA(self._clock, "period"):
            raise ValueError("Child %s has no period VA." % (self._clock.name,))

        # Only one VA from the clock
        self.period = self._clock.period

        # All the other VAs are straight from the light
        self.emissions = self._light.emissions
        self.spectra = self._light.spectra
        self.power = self._light.power

        # Turn off/on the power of the clock based on the light power
        self.emissions.subscribe(self._onEmissions)
        self.power.subscribe(self._onPower)
コード例 #29
0
ファイル: emitter.py プロジェクト: pieleric/odemis
    def __init__(self, name, role, children, **kwargs):
        """
        children (dict str -> Emitter): arbitrary role -> emitter to be used as
          part of this emitter. All its provided emissions will be provided.
        """
        # TODO: allow to only use a subset of the emissions from each child

        if not children:
            raise ValueError("MultiplexLight needs children")

        model.Emitter.__init__(self, name, role, children=children, **kwargs)
        self._shape = ()

        self._child_idx = {} # Emitter -> index (shift) in the emissions/spectra

        spectra = []
        for n, child in children.items():
            if not (model.hasVA(child, "power") and
                    model.hasVA(child, "emissions") and
                    model.hasVA(child, "spectra")
                   ):
                raise ValueError("Child %s is not a light emitter" % (n,))
            self._child_idx[child] = len(spectra)
            spectra.extend(child.spectra.value)
            # TODO: update emissions whenever the child emissions change

        # Child with the maximum power range
        max_power = max(c.power.range[1] for c in self.children.value)
        self.power = model.FloatContinuous(0, (0., max_power), unit="W")
        self.power.subscribe(self._updatePower)

        # info on which source is which wavelength
        self.spectra = model.ListVA(spectra, unit="m", readonly=True)

        # It needs .spectra and .power
        pwr, em = self._readPwrEmissions()
        self.power._value = pwr

        # ratio of power per source
        # if some source don't support max power, clamped before 1
        self.emissions = model.ListVA(em, unit="", setter=self._setEmissions)
コード例 #30
0
ファイル: zstack.py プロジェクト: effting/odemis
    def _estimate_step_duration(self):
        """
        return (float > 0): estimated time (in s) that it takes to move the focus
          by one step.
        """
        speed = None
        if model.hasVA(self.focus, "speed"):
            speed = self.focus.speed.value.get('z', None)
        if speed is None:
            speed = 10e-6  # m/s, pessimistic

        return driver.estimateMoveDuration(abs(self.zstep.value), speed, 0.01)
コード例 #31
0
def AutoFocusSpectrometer(spectrograph, focuser, detectors, selector=None):
    """
    Run autofocus for a spectrograph. It will actually run autofocus on each
    gratings, and for each detectors. The input slit should already be in a
    good position (typically, almost closed), and a light source should be
    active.
    Note: it's currently tailored to the Andor Shamrock SR-193i. It's recommended
    to put the detector on the "direct" output as first detector.
    spectrograph (Actuator): should have grating and wavelength.
    focuser (Actuator): should have a z axis
    detectors (Detector or list of Detectors): all the detectors available on
      the spectrometer. The first detector will be used to autofocus all the
      gratings, and each other detector will be focused with the original
      grating.
    selector (Actuator or None): must have a rx axis with each position corresponding
     to one of the detectors. If there is only one detector, selector can be None.
    return (ProgressiveFuture -> dict((grating, detector)->focus position)): a progressive future
      which will eventually return a map of grating/detector -> focus position.
    """
    if not isinstance(detectors, collections.Iterable):
        detectors = [detectors]
    if not detectors:
        raise ValueError("At least one detector must be provided")

    # Create ProgressiveFuture and update its state to RUNNING
    est_start = time.time() + 0.1
    detector = detectors[0]
    if model.hasVA(detector, "exposureTime"):
        et = detector.exposureTime.value
    else:
        # Completely random... but we are in a case where probably that's the last
        # thing the caller will care about.
        et = 1

    # 1 time / grating + 1 time / extra detector
    cnts = len(spectrograph.axes["grating"].choices) + (len(detectors) - 1)
    f = model.ProgressiveFuture(start=est_start,
                                end=est_start + cnts * estimateAutoFocusTime(et))
    f.task_canceller = _CancelAutoFocusSpectrometer
    # Extra info for the canceller
    f._autofocus_state = RUNNING
    f._autofocus_lock = threading.Lock()
    f._subfuture = InstantaneousFuture()

    # Run in separate thread
    autofocus_thread = threading.Thread(target=executeTask,
                                        name="Spectrometer Autofocus",
                                        args=(f, _DoAutoFocusSpectrometer, f,
                                              spectrograph, focuser, detectors, selector))

    autofocus_thread.start()
    return f
コード例 #32
0
ファイル: fastem.py プロジェクト: Kleijwegt/odemis
def acquireTiledArea(stream, stage, area, live_stream=None):
    """
    :param stream: (SEMStream) Stream used for the acquisition.
     It must have the detector and emitter connected to the TFS XT client detector and scanner.
     It should be in focus.
     It must NOT have the following local VAs: horizontalFoV. resolution, scale
      (because the VAs of the hardware will be changed directly, and so they shouldn’t be changed by the stream).
    :param stage: (Actuator). It should have axes "x" and "y", which should already be referenced.
    :param area: (float, float, float, float) minx, miny, maxx, maxy:  coordinates of the overview region
    :param live_stream: (StaticStream or None): StaticStream to be updated with
       each tile acquired, to build up live the whole acquisition. NOT SUPPORTED YET.
    : return: (ProgressiveFuture), acquisition future. It returns the complete DataArray.
    """
    # Check the parameters
    if len(area) != 4:
        raise ValueError("area should be 4 float, but got %r" % (area, ))

    for vaname in ("horizontalFoV", "resolution", "scale"):
        if vaname in stream.emt_vas:
            raise ValueError("Stream shouldn't have its own VA %s" %
                             (vaname, ))

    if set(stage.axes) < {"x", "y"}:
        raise ValueError("Stage needs axes x and y, but has %s" %
                         (stage.axes.keys(), ))
    if model.hasVA(stage, "referenced"):
        refd = stage.referenced.value
        for a in ("x", "y"):
            if a in refd:
                if not refd[a]:
                    raise ValueError(
                        "Stage axis '%s' is not referenced. Reference it first"
                        % (a, ))
            else:
                logging.warning(
                    "Going to use the stage in absolute mode, but it doesn't report %s in .referenced VA",
                    a)

    else:
        logging.warning(
            "Going to use the stage in absolute mode, but it doesn't have .referenced VA"
        )

    if live_stream:
        raise NotImplementedError("live_stream not supported")

    est_dur = estimateTiledAcquisitionTime(stream, stage, area)
    f = model.ProgressiveFuture(start=time.time(), end=time.time() + est_dur)
    _executor.submitf(f, _run_overview_acquisition, f, stream, stage, area,
                      live_stream)

    return f
コード例 #33
0
ファイル: cam_test_abs.py プロジェクト: lanery/odemis
    def test_fan(self):
        if not model.hasVA(self.camera, "fanSpeed"):
            self.skipTest("Camera doesn't support setting fan speed")

        orig_fs = self.camera.fanSpeed.value
        self.camera.fanSpeed.value = self.camera.fanSpeed.range[0]
        self.assertEqual(self.camera.fanSpeed.value,
                         self.camera.fanSpeed.range[0])
        self.camera.fanSpeed.value = self.camera.fanSpeed.range[1]
        self.assertEqual(self.camera.fanSpeed.value,
                         self.camera.fanSpeed.range[1])
        self.camera.fanSpeed.value = orig_fs
        self.assertEqual(self.camera.fanSpeed.value, orig_fs)
コード例 #34
0
    def __init__(self, microscope, main_app):
        super().__init__(microscope, main_app)

        self.ebeam = main_app.main_data.ebeam

        if not self.ebeam:
            logging.info("No e-beam, plugin disabled ")
            return
        elif not model.hasVA(self.ebeam, "rotation"):
            logging.info("e-beam has no rotation, plugin disabled ")
            return

        self.ebeam.rotation.subscribe(self._on_rotation, init=True)
コード例 #35
0
ファイル: tileacq.py プロジェクト: delmic/odemis
    def _estimateStreamPixels(self, s):
        """
        return (int): the number of pixels the stream will generate during an
          acquisition
        """
        px = 0
        if isinstance(s, MultipleDetectorStream):
            for st in s.streams:
                # For the EMStream of a SPARC MDStream, it's just one pixel per
                # repetition (excepted in case  of fuzzing, but let's be optimistic)
                if isinstance(st, (EMStream, CLStream)):
                    px += 1
                else:
                    px += self._estimateStreamPixels(st)

            if hasattr(s, 'repetition'):
                px *= s.repetition.value[0] * s.repetition.value[1]

            return px
        elif isinstance(s, (ARStream, SpectrumStream)):
            # Temporarily reports 0 px, as we don't stitch these streams for now
            return 0

        if hasattr(s, 'emtResolution'):
            px = numpy.prod(s.emtResolution.value)
        elif hasattr(s, 'detResolution'):
            px = numpy.prod(s.detResolution.value)
        elif model.hasVA(s.detector, "resolution"):
            px = numpy.prod(s.detector.resolution.value)
        elif model.hasVA(s.emitter, "resolution"):
            px = numpy.prod(s.emitter.resolution.value)
        else:
            # This shouldn't happen, but let's "optimistic" by assuming it'll
            # only acquire one pixel.
            logging.info("Resolution of stream %s cannot be determined.", s)
            px = 1

        return px
コード例 #36
0
ファイル: _tiledacq.py プロジェクト: lanery/odemis
    def _estimateStreamPixels(self, s):
        """
        return (int): the number of pixels the stream will generate during an
          acquisition
        """
        px = 0
        if isinstance(s, MultipleDetectorStream):
            for st in s.streams:
                # For the EMStream of a SPARC MDStream, it's just one pixel per
                # repetition (excepted in case  of fuzzing, but let's be optimistic)
                if isinstance(st, (EMStream, CLStream)):
                    px += 1
                else:
                    px += self._estimateStreamPixels(st)

            if hasattr(s, 'repetition'):
                px *= s.repetition.value[0] * s.repetition.value[1]

            return px
        elif isinstance(s, (ARStream, SpectrumStream)):
            # Temporarily reports 0 px, as we don't stitch these streams for now
            return 0

        if hasattr(s, 'emtResolution'):
            px = numpy.prod(s.emtResolution.value)
        elif hasattr(s, 'detResolution'):
            px = numpy.prod(s.detResolution.value)
        elif model.hasVA(s.detector, "resolution"):
            px = numpy.prod(s.detector.resolution.value)
        elif model.hasVA(s.emitter, "resolution"):
            px = numpy.prod(s.emitter.resolution.value)
        else:
            # This shouldn't happen, but let's "optimistic" by assuming it'll
            # only acquire one pixel.
            logging.info("Resolution of stream %s cannot be determined.", s)
            px = 1

        return px
コード例 #37
0
ファイル: emitter.py プロジェクト: lanery/odemis
    def __init__(self, name, role, dependencies, **kwargs):
        """
        dependencies (dict str -> Emitter): arbitrary role -> emitter to be used as
          part of this emitter. All its provided emissions will be provided.
        """
        # TODO: allow to only use a subset of the emissions from each child

        if not dependencies:
            raise ValueError("MultiplexLight needs dependencies")

        model.Emitter.__init__(self, name, role, dependencies=dependencies, **kwargs)
        self._shape = ()

        self._child_idx = {} # Emitter -> index (shift) in the power/spectra

        spectra = []
        min_power = []
        max_power = []
        for n, child in dependencies.items():
            if not (model.hasVA(child, "power") and
                    model.hasVA(child, "spectra")
                   ):
                raise ValueError("Child %s is not a light emitter" % (n,))
            self._child_idx[child] = len(spectra)
            spectra.extend(child.spectra.value)
            min_power.extend(child.power.range[0])
            max_power.extend(child.power.range[1])
            # Subscribe to each child power to update self.power
            child.power.subscribe(self._updateMultiplexPower)

        # Child with the maximum power range
        self.power = model.ListContinuous(value=[0] * len(spectra),
                                          range=(tuple(min_power), tuple(max_power)),
                                          unit="W", cls=(int, long, float))
        self.power.subscribe(self._setChildPower)
        self._updateMultiplexPower(None)
        # info on which source is which wavelength
        self.spectra = model.ListVA(spectra, unit="m", readonly=True)
コード例 #38
0
    def _updateReferenced(self):
        """
        update the referenced VA
        """
        ref = {} # str (axes name) -> boolean (is referenced)
        # consider an axis referenced iff it's referenced in every referenceable children
        for c in self.children.value:
            if not model.hasVA(c, "referenced"):
                continue
            cref = c.referenced.value
            for a in (set(self.axes.keys()) & set(cref.keys())):
                ref[a] = ref.get(a, True) and cref[a]

        self.referenced._set_value(ref, force_write=True)
コード例 #39
0
ファイル: actuator.py プロジェクト: pieleric/odemis-old
    def _updateReferenced(self):
        """
        update the referenced VA
        """
        ref = {} # str (axes name) -> boolean (is referenced)
        # consider an axis referenced iff it's referenced in every referenceable children
        for c in self.children.value:
            if not model.hasVA(c, "referenced"):
                continue
            cref = c.referenced.value
            for a in (set(self.axes.keys()) & set(cref.keys())):
                ref[a] = ref.get(a, True) and cref[a]

        self.referenced._set_value(ref, force_write=True)
コード例 #40
0
ファイル: settings.py プロジェクト: Mahmood-B/odemis
    def __init__(self, tab_panel, tab_data, highlight_change=False):
        super(SecomSettingsController, self).__init__(tab_data)
        main_data = tab_data.main

        self._sem_panel = SemSettingsController(
            tab_panel.fp_settings_secom_sem, "No SEM found", highlight_change,
            tab_data)

        self._optical_panel = OpticalSettingsController(
            tab_panel.fp_settings_secom_optical, "No optical microscope found",
            highlight_change, tab_data)

        # Add the components based on what is available
        # TODO: move it to a separate thread to save time at init?
        if main_data.ccd:
            # Hide exposureTime as it's in local settings of the stream
            self.add_hw_component(main_data.ccd,
                                  self._optical_panel,
                                  hidden={"exposureTime"})

        if hasattr(tab_data, "confocal_set_stream"):
            conf_set_e = StreamController(tab_panel.pnl_opt_streams,
                                          tab_data.confocal_set_stream,
                                          tab_data)
            conf_set_e.stream_panel.flatten()  # removes the expander header
            # StreamController looks pretty much the same as SettingController
            self.setting_controllers.append(conf_set_e)
        else:
            tab_panel.pnl_opt_streams.Hide()  # Not needed

        # For now, we assume that the pinhole (axis) is global: valid for all
        # the confocal streams and FLIM stream. That's partly because most likely
        # the user wouldn't want to have separate values... and also because
        # anyway we don't currently support local stream axes.
        if main_data.pinhole:
            conf = get_hw_config(main_data.pinhole, self._hw_settings_config)
            for a in ("d", ):
                if a not in main_data.pinhole.axes:
                    continue
                self._optical_panel.add_axis(a, main_data.pinhole, conf.get(a))

        if main_data.ebeam:
            self.add_hw_component(main_data.ebeam, self._sem_panel)

            # If can do AutoContrast, display the button
            # TODO: check if detector has a .applyAutoContrast() method, instead
            # of detecting indirectly via the presence of .bpp.
            det = main_data.sed or main_data.bsd
            if det and model.hasVA(det, "bpp"):
                self._sem_panel.add_bc_control(det)
コード例 #41
0
ファイル: _projection.py プロジェクト: pieleric/odemis
    def _computeSpec(self):
        """
        Compute the spectrum from the stream with the current parameters.

        Returns: a 1-D DataArray or None if the spectrum could not be computed
        """
        data = self.stream.calibrated.value

        if (self.stream.selected_pixel.value == (None, None) or
            data is None or data.shape[0] == 1):
            return None

        x, y = self.stream.selected_pixel.value
        if model.hasVA(self.stream, "selected_time"):
            t = self.stream._tl_px_values.index(self.stream.selected_time.value)
        else:
            t = 0
        spec2d = self.stream.calibrated.value[:, t, 0, :, :]  # same data but remove useless dims

        md = dict(data.metadata)
        md[model.MD_DIMS] = "C"

        # We treat width as the diameter of the circle which contains the center
        # of the pixels to be taken into account
        width = self.stream.selectionWidth.value
        if width == 1:  # short-cut for simple case
            data = spec2d[:, y, x]
            return model.DataArray(data, md)

        # There are various ways to do it with numpy. As typically the spectrum
        # dimension is big, and the number of pixels to sum is small, it seems
        # the easiest way is to just do some kind of "clever" mean. Using a
        # masked array would also work, but that'd imply having a huge mask.
        radius = width / 2
        n = 0
        # TODO: use same cleverness as mean() for dtype?
        datasum = numpy.zeros(spec2d.shape[0], dtype=numpy.float64)
        # Scan the square around the point, and only pick the points in the circle
        for px in range(max(0, int(x - radius)),
                        min(int(x + radius) + 1, spec2d.shape[-1])):
            for py in range(max(0, int(y - radius)),
                            min(int(y + radius) + 1, spec2d.shape[-2])):
                if math.hypot(x - px, y - py) <= radius:
                    n += 1
                    datasum += spec2d[:, py, px]

        mean = datasum / n

        return model.DataArray(mean.astype(spec2d.dtype), md)
コード例 #42
0
ファイル: find_overlay.py プロジェクト: lazem/odemis
def _set_blanker(escan, active):
    """
    Set the blanker to the given state iif the blanker doesn't support "automatic"
      mode (ie, None).
    escan (ebeam scanner)
    active (bool): True = blanking = no ebeam
    """
    try:
        if (model.hasVA(escan, "blanker")
                and not None in escan.blanker.choices):
            # Note: we assume that this is blocking, until the e-beam is
            # ready to acquire an image.
            escan.blanker.value = active
    except Exception:
        logging.exception("Failed to set the blanker to %s", active)
コード例 #43
0
ファイル: acquisition.py プロジェクト: ktsitsikas/odemis
    def _set_fan(self, enable):
        """
        Turn on/off the fan of the CCD
        enable (boolean): True to turn on/restore the fan, and False to turn if off
        """
        if model.hasVA(self._main_data_model.ccd, "fanSpeed"):
            return

        fs = self._main_data_model.ccd.fanSpeed
        if enable:
            if self._orig_fan_speed is not None:
                fs.value = max(fs.value, self._orig_fan_speed)
        else:
            self._orig_fan_speed = fs.value
            fs.value = 0
コード例 #44
0
ファイル: settings.py プロジェクト: delmic/odemis
    def __init__(self, tab_panel, tab_data, highlight_change=False):
        super(SecomSettingsController, self).__init__(tab_data)
        main_data = tab_data.main

        self._sem_panel = SemSettingsController(tab_panel.fp_settings_secom_sem,
                                                "No SEM found",
                                                highlight_change,
                                                tab_data)

        self._optical_panel = OpticalSettingsController(tab_panel.fp_settings_secom_optical,
                                                        "No optical microscope found",
                                                        highlight_change,
                                                        tab_data)

        # Add the components based on what is available
        # TODO: move it to a separate thread to save time at init?
        if main_data.ccd:
            # Hide exposureTime as it's in local settings of the stream
            self.add_hw_component(main_data.ccd, self._optical_panel, hidden={"exposureTime"})

        if hasattr(tab_data, "confocal_set_stream"):
            conf_set_e = StreamController(tab_panel.pnl_opt_streams, tab_data.confocal_set_stream, tab_data)
            conf_set_e.stream_panel.flatten()  # removes the expander header
            # StreamController looks pretty much the same as SettingController
            self.setting_controllers.append(conf_set_e)
        else:
            tab_panel.pnl_opt_streams.Hide()  # Not needed

        # For now, we assume that the pinhole (axis) is global: valid for all
        # the confocal streams and FLIM stream. That's partly because most likely
        # the user wouldn't want to have separate values... and also because
        # anyway we don't currently support local stream axes.
        if main_data.pinhole:
            conf = get_hw_config(main_data.pinhole, self._hw_settings_config)
            for a in ("d",):
                if a not in main_data.pinhole.axes:
                    continue
                self._optical_panel.add_axis(a, main_data.pinhole, conf.get(a))

        if main_data.ebeam:
            self.add_hw_component(main_data.ebeam, self._sem_panel)

            # If can do AutoContrast, display the button
            # TODO: check if detector has a .applyAutoContrast() method, instead
            # of detecting indirectly via the presence of .bpp.
            det = main_data.sed or main_data.bsd
            if det and model.hasVA(det, "bpp"):
                self._sem_panel.add_bc_control(det)
コード例 #45
0
ファイル: ar_spectral.py プロジェクト: effting/odemis
    def _find_spectrometer(self, detector):
        """
        Find a spectrometer which wraps the given detector
        return (Detector): the spectrometer
        raise LookupError: if nothing found.
        """
        for spec in self.main_app.main_data.spectrometers:
            # Check by name as the components are actually Pyro proxies, which
            # might not be equal even if they point to the same component.
            if (model.hasVA(spec, "dependencies")
                    and detector.name in (d.name
                                          for d in spec.dependencies.value)):
                return spec

        raise LookupError("No spectrometer corresponding to %s found" %
                          (detector.name, ))
コード例 #46
0
ファイル: cam_test_abs.py プロジェクト: lanery/odemis
    def test_software_trigger(self):
        """
        Check that the synchronisation with softwareTrigger works.
        Make it typical, by waiting for the data received, and then notifying
        the software trigger again after a little while.
        """
        if not hasattr(self.ccd, "softwareTrigger"):
            self.skipTest("Camera doesn't support software trigger")

        self.ccd.exposureTime.value = self.ccd.exposureTime.clip(50e-3)  # s
        exp = self.ccd.exposureTime.value

        if model.hasVA(self.ccd, "binning") and not self.ccd.binning.readonly:
            self.ccd.binning.value = self.ccd.binning.clip((1, 1))

        self.ccd_size = self.ccd.resolution.value
        readout = numpy.prod(self.ccd_size) / self.ccd.readoutRate.value
        duration = exp + readout  # approximate time for one frame

        numbert = 10
        self.ccd_left = numbert  # unsubscribe after receiving

        try:
            self.ccd.data.synchronizedOn(self.ccd.softwareTrigger)
        except IOError:
            self.skipTest("Camera doesn't support synchronisation")
        self.ccd.data.subscribe(self.receive_ccd_image)

        # Wait for the image
        for i in range(numbert):
            self.got_image.clear()
            self.ccd.softwareTrigger.notify()
            # wait for the image to be received
            gi = self.got_image.wait(duration + 10)
            self.assertTrue(gi,
                            "image not received after %g s" % (duration + 10))
            time.sleep(i * 0.1)  # wait a bit to simulate some processing

        self.assertEqual(self.ccd_left, 0)
        self.ccd.data.synchronizedOn(None)

        # check we can still get data normally
        d = self.ccd.data.get()

        time.sleep(0.1)
コード例 #47
0
    def estimateAcquisitionTime(self, dt, shape):
        # It's pretty easy to know how many times the leech will run, it's a lot
        # harder to know how long it takes to acquire one probe current reading.

        nacqs = 1 + math.ceil(dt * numpy.prod(shape) / self.period.value)
        if model.hasVA(self._detector, "dwellTime"):
            at = self._detector.dwellTime.value
        else:
            at = 0.1
        if self._selector:
            # The time it takes probably depends a lot on the hardware, and
            # there is not much info (maybe the .speed could be used).
            # For now, we just use the time the only hardware we support takes
            at += 3.0 * 2  # doubled as it has go back and forth

        at += 0.1  # for overhead

        return nacqs * at
コード例 #48
0
ファイル: _helper.py プロジェクト: pieleric/odemis-old
    def acquire(self):
        """
        Runs the find overlay procedure
        returns Future that will have as a result an empty DataArray with
        the correction metadata
        """
        # Just calls the FindOverlay function and return its future
        ovrl_future = align.FindOverlay(self.repetition.value,
                                        self.dwellTime.value,
                                        OVRL_MAX_DIFF,
                                        self._emitter,
                                        self._ccd,
                                        self._detector,
                                        skew=True,
                                        bgsub=model.hasVA(self._emitter, "blanker"))

        ovrl_future.result = self._result_wrapper(ovrl_future.result)
        return ovrl_future
コード例 #49
0
    def test_software_trigger(self):
        """
        Check that the synchronisation with softwareTrigger works.
        Make it typical, by waiting for the data received, and then notifying
        the software trigger again after a little while.
        """
        if not hasattr(self.ccd, "softwareTrigger"):
            self.skipTest("Camera doesn't support software trigger")

        self.ccd.exposureTime.value = self.ccd.exposureTime.clip(50e-3)  # s
        exp = self.ccd.exposureTime.value

        if model.hasVA(self.ccd, "binning") and not self.ccd.binning.readonly:
            self.ccd.binning.value = self.ccd.binning.clip((1, 1))

        self.ccd_size = self.ccd.resolution.value
        readout = numpy.prod(self.ccd_size) / self.ccd.readoutRate.value
        duration = exp + readout # approximate time for one frame

        numbert = 10
        self.ccd_left = numbert # unsubscribe after receiving

        try:
            self.ccd.data.synchronizedOn(self.ccd.softwareTrigger)
        except IOError:
            self.skipTest("Camera doesn't support synchronisation")
        self.ccd.data.subscribe(self.receive_ccd_image)

        # Wait for the image
        for i in range(numbert):
            self.got_image.clear()
            self.ccd.softwareTrigger.notify()
            # wait for the image to be received
            gi = self.got_image.wait(duration + 10)
            self.assertTrue(gi, "image not received after %g s" % (duration + 10))
            time.sleep(i * 0.1) # wait a bit to simulate some processing

        self.assertEqual(self.ccd_left, 0)
        self.ccd.data.synchronizedOn(None)

        # check we can still get data normally
        d = self.ccd.data.get()

        time.sleep(0.1)
コード例 #50
0
ファイル: scanner.py プロジェクト: ktsitsikas/odemis
    def __init__(self, name, role, children, **kwargs):
        '''
        children (dict string->model.HwComponent): the children
            There must be exactly two children "external" and "internal".
        Raise:
          ValueError: if the children are not compatible
        '''
        # we will fill the set of children with Components later in ._children
        model.Emitter.__init__(self, name, role, **kwargs)

        # Check the children
        extnl = children["external"]
        if not isinstance(extnl, ComponentBase):
            raise ValueError("Child external is not a component.")
        if not model.hasVA(extnl, "pixelSize"):
            raise ValueError("Child external is not a Emitter component.")
        self._external = extnl
        self.children.value.add(extnl)

        intnl = children["internal"]
        if not isinstance(intnl, ComponentBase):
            raise ValueError("Child internal is not a component.")
        if not model.hasVA(intnl, "pixelSize"):
            raise ValueError("Child internal is not a Emitter component.")
        self._internal = intnl
        self.children.value.add(intnl)

        # Copy VAs directly related to scanning from external
        self._shape = self._external.shape
        for vaname in ("pixelSize", "translation", "resolution", "scale",
                       "rotation", "dwellTime"):
            if model.hasVA(self._external, vaname):
                va = getattr(self._external, vaname)
                setattr(self, vaname, va)

        # Copy VAs for controlling the ebeam from internal
        # horizontalFoV or magnification need a bit more cleverness
        if model.hasVA(self._internal, "horizontalFoV"):
            self.horizontalFoV = self._internal.horizontalFoV
            # Create read-only magnification VA
            mag = self._external.HFWNoMag / self.horizontalFoV.value
            self.magnification = model.VigilantAttribute(mag, unit="", readonly=True)
            self.horizontalFoV.subscribe(self._updateMagnification, init=True)
        elif model.hasVA(self._external, "magnification"):
            self.magnification = self._external.magnification

        # TODO: just pick every VAs which are not yet on self?
        for vaname in ("accelVoltage", "power", "probeCurrent"):
            if model.hasVA(self._internal, vaname):
                va = getattr(self._internal, vaname)
                setattr(self, vaname, va)
コード例 #51
0
ファイル: actuator.py プロジェクト: pieleric/odemis-old
    def _doReference(self, axes):
        fs = []
        for c in self.children.value:
            # only do the referencing for the stages that support it
            if not model.hasVA(c, "referenced"):
                continue
            ax = axes & set(c.referenced.value.keys())
            fs.append(c.reference(ax))

        # wait for all referencing to be over
        for f in fs:
            f.result()

        # Re-synchronize the 2 stages by moving the slave where the master is
        mpos = self._master.position.value
        f = self._stage_conv.moveAbs({"x": mpos["x"], "y": mpos["y"]})
        f.result()

        self._updatePosition()
コード例 #52
0
    def _doReference(self, axes):
        fs = []
        for c in self.children.value:
            # only do the referencing for the stages that support it
            if not model.hasVA(c, "referenced"):
                continue
            ax = axes & set(c.referenced.value.keys())
            fs.append(c.reference(ax))

        # wait for all referencing to be over
        for f in fs:
            f.result()

        # Re-synchronize the 2 stages by moving the slave where the master is
        mpos = self._master.position.value
        f = self._stage_conv.moveAbs({"x": mpos["x"], "y": mpos["y"]})
        f.result()

        self._updatePosition()
コード例 #53
0
ファイル: cam_test_abs.py プロジェクト: lanery/odemis
    def test_translation(self):
        """
        test the translation VA (if available)
        """
        if (not model.hasVA(self.camera, "translation")
                or self.camera.translation.readonly):
            self.skipTest("Camera doesn't support setting translation")

        # Check the translation can be changed
        self.camera.binning.value = (2, 2)
        self.camera.resolution.value = (16, 16)
        self.camera.translation.value = (
            -10, 3)  # values are small enough they should always be fine
        im = self.camera.data.get()
        self.assertEqual(self.camera.translation.value, (-10, 3))

        # Check the translation automatically fits after putting a large ROI
        self.camera.binning.value = (1, 1)
        self.camera.resolution.value = self.camera.resolution.range[1]
        self.assertEqual(self.camera.translation.value, (0, 0))
        im = self.camera.data.get()

        # Check the MD_POS metadata is correctly updated
        orig_md = {
            model.MD_PIXEL_SIZE: (1e-6, 1e-6),  # m
            model.MD_PIXEL_SIZE_COR: (0.5, 0.5),  # the actual pxs is /2
            model.MD_POS: (-1.1, 0.9),
        }
        self.camera.updateMetadata(orig_md)
        im = self.camera.data.get()
        self.assertEqual(im.metadata[model.MD_POS], orig_md[model.MD_POS])

        self.camera.binning.value = (2, 2)
        self.camera.updateMetadata({model.MD_PIXEL_SIZE: (2e-6, 2e-6)})
        self.camera.resolution.value = (16, 16)
        im = self.camera.data.get()
        self.assertEqual(im.metadata[model.MD_POS], orig_md[model.MD_POS])

        self.camera.translation.value = (-10, 3)
        im = self.camera.data.get()
        exp_pos = (-1.1 + (-10 * 2e-6 * 0.5), 0.9 - (3 * 2e-6 * 0.5)
                   )  # phys Y goes opposite direction
        self.assertEqual(im.metadata[model.MD_POS], exp_pos)
コード例 #54
0
    def test_translation(self):
        """
        test the translation VA (if available)
        """
        if (not model.hasVA(self.camera, "translation") or
            self.camera.translation.readonly):
            self.skipTest("Camera doesn't support setting translation")

        # Check the translation can be changed
        self.camera.binning.value = (2, 2)
        self.camera.resolution.value = (16, 16)
        self.camera.translation.value = (-10, 3) # values are small enough they should always be fine
        im = self.camera.data.get()
        self.assertEqual(self.camera.translation.value, (-10, 3))

        # Check the translation automatically fits after putting a large ROI
        self.camera.binning.value = (1, 1)
        self.camera.resolution.value = self.camera.resolution.range[1]
        self.assertEqual(self.camera.translation.value, (0, 0))
        im = self.camera.data.get()

        # Check the MD_POS metadata is correctly updated
        orig_md = {model.MD_PIXEL_SIZE: (1e-6, 1e-6), # m
                   model.MD_PIXEL_SIZE_COR: (0.5, 0.5), # the actual pxs is /2
                   model.MD_POS: (-1.1, 0.9),
                   }
        self.camera.updateMetadata(orig_md)
        im = self.camera.data.get()
        self.assertEqual(im.metadata[model.MD_POS], orig_md[model.MD_POS])

        self.camera.binning.value = (2, 2)
        self.camera.updateMetadata({model.MD_PIXEL_SIZE: (2e-6, 2e-6)})
        self.camera.resolution.value = (16, 16)
        im = self.camera.data.get()
        self.assertEqual(im.metadata[model.MD_POS], orig_md[model.MD_POS])

        self.camera.translation.value = (-10, 3)
        im = self.camera.data.get()
        exp_pos = (-1.1 + (-10 * 2e-6 * 0.5), 0.9 - (3 * 2e-6 * 0.5))  # phys Y goes opposite direction
        self.assertEqual(im.metadata[model.MD_POS], exp_pos)
コード例 #55
0
ファイル: cam_test_abs.py プロジェクト: lanery/odemis
    def test_binning(self):
        if not model.hasVA(self.camera,
                           "binning") or self.camera.binning.readonly:
            self.skipTest("Camera doesn't support setting binning")

        self.camera.binning.value = (1, 1)
        if hasattr(self.camera.binning, "range"):
            max_binning = self.camera.binning.range[1]
        else:  # if binning-VA is VAEnumerated
            max_binning = max(self.camera.binning.choices)
        new_binning = (2, 2)
        if new_binning >= max_binning:
            # if there is no binning 2, let's not try
            self.skipTest("Camera doesn't support binning")

        # binning should automatically resize the image
        prev_size = self.camera.resolution.value
        self.camera.binning.value = new_binning
        self.assertNotEqual(self.camera.resolution.value, prev_size)

        # ask for the whole image
        self.size = (self.camera.shape[0] // 2, self.camera.shape[1] // 2)
        self.camera.resolution.value = self.size
        exposure = 0.1
        self.camera.exposureTime.value = exposure

        start = time.time()
        im = self.camera.data.get()
        duration = time.time() - start

        self.assertEqual(
            im.shape, self.size[::-1]
        )  # TODO a small size diff is fine if bigger than requested
        self.assertGreaterEqual(
            duration, exposure,
            "Error execution took %f s, less than exposure time %f." %
            (duration, exposure))
        self.assertIn(model.MD_EXP_TIME, im.metadata)
        self.assertEqual(im.metadata[model.MD_BINNING], new_binning)
コード例 #56
0
ファイル: _projection.py プロジェクト: pieleric/odemis
    def _projectTile(self, tile):
        """
        Project the tile
        tile (DataArray): Raw tile
        return (DataArray): Projected tile
        """
        dims = tile.metadata.get(model.MD_DIMS, "CTZYX"[-tile.ndim::])
        ci = dims.find("C")  # -1 if not found
        # is RGB
        if dims in ("CYX", "YXC") and tile.shape[ci] in (3, 4):
            # Just pass the RGB data on
            tile = img.ensureYXC(tile)
            tile.flags.writeable = False
            # merge and ensures all the needed metadata is there
            tile.metadata = self.stream._find_metadata(tile.metadata)
            tile.metadata[model.MD_DIMS] = "YXC" # RGB format
            return tile
        elif dims in ("ZYX",) and model.hasVA(self.stream, "zIndex"):
            tile = img.getYXFromZYX(tile, self.stream.zIndex.value)
            tile.metadata[model.MD_DIMS] = "ZYX"
        else:
            tile = img.ensure2DImage(tile)

        return self._projectXY2RGB(tile, self.stream.tint.value)
コード例 #57
0
ファイル: _projection.py プロジェクト: pieleric/odemis
    def __init__(self, stream):
        '''
        stream (Stream): the Stream to project
        '''
        super(RGBSpatialProjection, self).__init__(stream)

        # handle z stack
        if model.hasVA(stream, "zIndex"):
            self.zIndex = stream.zIndex
            self.zIndex.subscribe(self._onZIndex)

        if stream.raw and isinstance(stream.raw[0], model.DataArrayShadow):
            # The raw tiles corresponding to the .image, updated whenever .image is updated
            self._raw = (())  # 2D tuple of DataArrays
            raw = stream.raw[0]
            md = raw.metadata
            # get the pixel size of the full image
            ps = md[model.MD_PIXEL_SIZE]
            max_mpp = ps[0] * (2 ** raw.maxzoom)
            # sets the mpp as the X axis of the pixel size of the full image
            mpp_rng = (ps[0], max_mpp)
            self.mpp = model.FloatContinuous(max_mpp, mpp_rng, setter=self._set_mpp)
            full_rect = img._getBoundingBox(raw)
            l, t, r, b = full_rect
            rect_range = ((l, b, l, b), (r, t, r, t))
            self.rect = model.TupleContinuous(full_rect, rect_range)
            self.mpp.subscribe(self._onMpp)
            self.rect.subscribe(self._onRect)
            # initialize the projected tiles cache
            self._projectedTilesCache = {}
            # initialize the raw tiles cache
            self._rawTilesCache = {}
            # When True, the projected tiles cache should be invalidated
            self._projectedTilesInvalid = True

        self._shouldUpdateImage()
コード例 #58
0
ファイル: scanner.py プロジェクト: pieleric/odemis
    def __init__(self, name, role, children, **kwargs):
        '''
        children (dict string->model.HwComponent): the children
            There must be exactly two children "external" and "internal".
        Raise:
          ValueError: if the children are not compatible
        '''
        # we will fill the set of children with Components later in ._children
        model.Emitter.__init__(self, name, role, **kwargs)

        # Check the children
        extnl = children["external"]
        if not isinstance(extnl, ComponentBase):
            raise ValueError("Child external is not a component.")
        if not model.hasVA(extnl, "pixelSize"):
            raise ValueError("Child external is not a Emitter component.")
        self._external = extnl
        self.children.value.add(extnl)

        intnl = children["internal"]
        if not isinstance(intnl, ComponentBase):
            raise ValueError("Child internal is not a component.")
        if not hasattr(intnl, "shape"):
            # Note: the internal component doesn't need to provide pixelSize
            raise ValueError("Child internal is not a Emitter component.")
        self._internal = intnl
        self.children.value.add(intnl)

        # Copy VAs directly related to scanning from external
        self._shape = self._external.shape
        for vaname in ("pixelSize", "translation", "resolution", "scale",
                       "rotation", "dwellTime"):
            if model.hasVA(self._external, vaname):
                va = getattr(self._external, vaname)
                setattr(self, vaname, va)

        # Copy VAs for controlling the ebeam from internal
        # horizontalFoV or magnification need a bit more cleverness
        if model.hasVA(self._internal, "horizontalFoV"):
            self.horizontalFoV = self._internal.horizontalFoV
            # Create read-only magnification VA
            # TODO: why not just using the magnification VA from the internal?
            self.magnification = model.VigilantAttribute(1, unit="", readonly=True)
            self.horizontalFoV.subscribe(self._updateMagnification, init=True)
        elif model.hasVA(self._external, "magnification"):
            self.magnification = self._external.magnification

        # TODO: just pick every VAs which are not yet on self?
        for vaname in ("accelVoltage", "probeCurrent", "depthOfField", "spotSize"):
            if model.hasVA(self._internal, vaname):
                va = getattr(self._internal, vaname)
                setattr(self, vaname, va)

        # VAs that could be both on internal or external. If on both, pick internal
        # TODO: add a better way to select if both provide: either via arg, or
        # select the one which provides a None (=auto)?
        for vaname in ("power", "blanker", "external"):
            if model.hasVA(self._internal, vaname):
                va = getattr(self._internal, vaname)
                setattr(self, vaname, va)
            elif model.hasVA(self._external, vaname):
                va = getattr(self._external, vaname)
                setattr(self, vaname, va)
コード例 #59
0
ファイル: _projection.py プロジェクト: pieleric/odemis
    def _computeSpec(self):
        """
        Compute the 1D spectrum from the stream.calibrated VA using the
        selected_time, selected_line, and width.

        return: spec1d, md
            where spec1d is the line spectrum (1D array)
            and md is the metadata structure
        """

        if ((None, None) in self.stream.selected_line.value or
            self.stream.calibrated.value.shape[0] == 1):
            return None, None

        if model.hasVA(self.stream, "selected_time"):
            t = self.stream._tl_px_values.index(self.stream.selected_time.value)
        else:
            t = 0

        spec2d = self.stream.calibrated.value[:, t, 0, :, :]  # same data but remove useless dims
        width = self.stream.selectionWidth.value

        # Number of points to return: the length of the line
        start, end = self.stream.selected_line.value
        v = (end[0] - start[0], end[1] - start[1])
        l = math.hypot(*v)
        n = 1 + int(l)
        if l < 1:  # a line of just one pixel is considered not valid
            return None, None

        # FIXME: if the data has a width of 1 (ie, just a line), and the
        # requested width is an even number, the output is empty (because all
        # the interpolated points are outside of the data.

        # Coordinates of each point: ndim of data (5-2), pos on line (Y), spectrum (X)
        # The line is scanned from the end till the start so that the spectra
        # closest to the origin of the line are at the bottom.
        coord = numpy.empty((3, width, n, spec2d.shape[0]))
        coord[0] = numpy.arange(spec2d.shape[0])  # spectra = all
        coord_spc = coord.swapaxes(2, 3)  # just a view to have (line) space as last dim
        coord_spc[-1] = numpy.linspace(end[0], start[0], n)  # X axis
        coord_spc[-2] = numpy.linspace(end[1], start[1], n)  # Y axis

        # Spread over the width
        # perpendicular unit vector
        pv = (-v[1] / l, v[0] / l)
        width_coord = numpy.empty((2, width))
        spread = (width - 1) / 2
        width_coord[-1] = numpy.linspace(pv[0] * -spread, pv[0] * spread, width)  # X axis
        width_coord[-2] = numpy.linspace(pv[1] * -spread, pv[1] * spread, width)  # Y axis

        coord_cw = coord[1:].swapaxes(0, 2).swapaxes(1, 3)  # view with coordinates and width as last dims
        coord_cw += width_coord

        # Interpolate the values based on the data
        if width == 1:
            # simple version for the most usual case
            spec1d = ndimage.map_coordinates(spec2d, coord[:, 0, :, :], order=1)
        else:
            # FIXME: the mean should be dependent on how many pixels inside the
            # original data were pick on each line. Currently if some pixels fall
            # out of the original data, the outside pixels count as 0.
            # force the intermediate values to float, as mean() still needs to run
            spec1d_w = ndimage.map_coordinates(spec2d, coord, output=numpy.float, order=1)
            spec1d = spec1d_w.mean(axis=0).astype(spec2d.dtype)
        assert spec1d.shape == (n, spec2d.shape[0])

        # Use metadata to indicate spatial distance between pixel
        pxs_data = self.stream.calibrated.value.metadata[MD_PIXEL_SIZE]

        if pxs_data[0] is not None:
            pxs = math.hypot(v[0] * pxs_data[0], v[1] * pxs_data[1]) / (n - 1)
        else:
            logging.warning("Pixel size should have two dimensions")
            return None, None

        raw_md = self.stream.calibrated.value.metadata
        md = raw_md.copy()
        md[model.MD_DIMS] = "XC"  # RGB format
        md[MD_PIXEL_SIZE] = (None, pxs)  # for the spectrum, use get_spectrum_range()
        return spec1d, md
コード例 #60
0
ファイル: settings.py プロジェクト: delmic/odemis
    def add_bc_control(self, detector):
        """ Add Hw brightness/contrast control """

        self.panel.add_divider()

        # Create extra gird bag sizer
        gb_sizer = wx.GridBagSizer()
        gb_sizer.SetEmptyCellSize((0, 0))

        # Create the widgets

        btn_autoadjust = ImageTextToggleButton(self.panel, height=24, label="Auto adjust",
                                               icon=img.getBitmap("icon/ico_contrast.png"))
        btn_autoadjust.SetToolTip("Adjust detector brightness/contrast")

        gb_sizer.Add(btn_autoadjust, (0, 0), (2, 1), border=10,
                     flag=wx.ALIGN_CENTRE_VERTICAL | wx.RIGHT)

        sld_conf = {
            "accuracy": 2,
            "event": wx.EVT_SCROLL_CHANGED,
            "control_type": odemis.gui.CONTROL_SLIDER,
            "type": "slider",
        }

        num_rows = 0

        if model.hasVA(detector, "brightness"):
            brightness_entry = self.add_setting_entry("brightness", detector.brightness, detector,
                                                      sld_conf)
            # TODO: 'Ugly' detaching somewhat nullifies the cleanliness created by using
            # 'add_setting_entry'. 'add_setting_entry' Needs some more refactoring anyway.
            self.panel.gb_sizer.Detach(brightness_entry.value_ctrl)
            self.panel.gb_sizer.Detach(brightness_entry.lbl_ctrl)

            gb_sizer.Add(brightness_entry.lbl_ctrl, (num_rows, 1))
            gb_sizer.Add(brightness_entry.value_ctrl, (num_rows, 2), flag=wx.EXPAND)
            num_rows += 1

        if model.hasVA(detector, "contrast"):
            contrast_entry = self.add_setting_entry("contrast", detector.contrast, detector,
                                                    sld_conf)

            self.panel.gb_sizer.Detach(contrast_entry.value_ctrl)
            self.panel.gb_sizer.Detach(contrast_entry.lbl_ctrl)

            gb_sizer.Add(contrast_entry.lbl_ctrl, (num_rows, 1))
            gb_sizer.Add(contrast_entry.value_ctrl, (num_rows, 2), flag=wx.EXPAND)
            num_rows += 1

        if num_rows:
            gb_sizer.AddGrowableCol(2)

        # Add the extra sizer to the main sizer
        self.panel.gb_sizer.Add(gb_sizer, (self.panel.num_rows, 0), span=(1, 2),
                                border=5, flag=wx.ALL | wx.EXPAND)
        self.panel.num_rows += 1

        # Connect various events to the auto adjust button

        def on_chamber_state(state, btn=btn_autoadjust):
            wx.CallAfter(btn.Enable, state in (CHAMBER_UNKNOWN, CHAMBER_VACUUM))

        # We keep a reference to keep the subscription active.
        self._subscriptions.append(on_chamber_state)
        self.tab_data.main.chamberState.subscribe(on_chamber_state, init=True)

        def adjust_done(_):
            """ Callback that enables and untoggles the 'auto adjust' contrast button """
            btn_autoadjust.SetToggle(False)
            btn_autoadjust.SetLabel("Auto adjust")
            btn_autoadjust.Enable()
            brightness_entry.value_ctrl.Enable()
            contrast_entry.value_ctrl.Enable()

        def auto_adjust(_):
            """ Call the auto contrast method on the detector if it's not already running """
            if not btn_autoadjust.up:
                f = detector.applyAutoContrast()
                btn_autoadjust.SetLabel("Adjusting...")
                btn_autoadjust.Disable()
                brightness_entry.value_ctrl.Disable()
                contrast_entry.value_ctrl.Disable()
                f.add_done_callback(adjust_done)

        btn_autoadjust.Bind(wx.EVT_BUTTON, auto_adjust)