コード例 #1
0
ファイル: _base.py プロジェクト: amuskens/odemis
    def _updateImage(self):
        """ Recomputes the image with all the raw data available
        """
        # logging.debug("Updating image")
        if not self.raw:
            return

        try:
            if not isinstance(self.raw, list):
                raise AttributeError(".raw must be a list of DA/DAS")

            data = self.raw[0]
            bkg = self.background.value
            if bkg is not None:
                try:
                    data = img.Subtract(data, bkg)
                except Exception as ex:
                    logging.info("Failed to subtract background data: %s", ex)

            dims = data.metadata.get(model.MD_DIMS, "CTZYX"[-data.ndim::])
            ci = dims.find("C")  # -1 if not found
            # is RGB
            if dims in ("CYX", "YXC") and data.shape[ci] in (3, 4):
                rgbim = img.ensureYXC(data)
                rgbim.flags.writeable = False
                # merge and ensures all the needed metadata is there
                rgbim.metadata = self._find_metadata(rgbim.metadata)
                rgbim.metadata[model.MD_DIMS] = "YXC"  # RGB format
                self.image.value = rgbim
            else:  # is grayscale
                if data.ndim != 2:
                    data = img.ensure2DImage(data)  # Remove extra dimensions (of length 1)
                self.image.value = self._projectXY2RGB(data, self.tint.value)
        except Exception:
            logging.exception("Updating %s %s image", self.__class__.__name__, self.name.value)
コード例 #2
0
ファイル: calibration.py プロジェクト: thomasaarholt/odemis
def compensate_spectrum_efficiency(data, bckg=None, coef=None):
    """
    Apply the efficiency compensation factors to the given data.
    If the wavelength of the calibration doesn't cover the whole data wavelength,
    the missing wavelength is filled by the same value as the border. Wavelength
    in-between points is linearly interpolated.
    data (DataArray of at least 5 dims): the original data. Need MD_WL_* metadata
    bckg (None or DataArray of at least 5 dims): the background data, with TZXY = 1111
      Need MD_WL_* metadata.
    coef (None or DataArray of at least 5 dims): the coeficient data, with TZXY = 1111
      Need MD_WL_* metadata.
    returns (DataArray): same shape as original data. Can have dtype=float
    """
    # Need to get the calibration data for each wavelength of the data
    wl_data = spectrum.get_wavelength_per_pixel(data)

    # TODO: use MD_BASELINE as a fallback?
    if bckg is not None:
        if bckg.shape[1:] != (1, 1, 1, 1):
            raise ValueError("bckg should have shape C1111")
        # It must be fitting the data
        # TODO: support if the data is binned?
        if data.shape[0] != bckg.shape[0]:
            raise ValueError(
                "Background should have same length as the data, but got %d != %d"
                % (bckg.shape[0], data.shape[0]))

        wl_bckg = spectrum.get_wavelength_per_pixel(bckg)
        # Warn if not the same wavelength
        if not numpy.allclose(wl_bckg, wl_data):
            logging.warning(
                "Spectrum background is between %g->%g nm, "
                "while the spectrum is between %g->%g nm.", wl_bckg[0] * 1e9,
                wl_bckg[-1] * 1e9, wl_data[0] * 1e9, wl_data[-1] * 1e9)

        data = img.Subtract(data, bckg)

    # We could be more clever if calib has a MD_WL_POLYNOMIAL, but it's very
    # unlikely the calibration is in this form anyway.
    if coef is not None:
        if coef.shape[1:] != (1, 1, 1, 1):
            raise ValueError("coef should have shape C1111")
        wl_coef = spectrum.get_wavelength_per_pixel(coef)

        # Warn if the calibration is not enough for the data
        if wl_coef[0] > wl_data[0] or wl_coef[-1] < wl_data[-1]:
            logging.warning(
                "Spectrum efficiency compensation is only between "
                "%g->%g nm, while the spectrum is between %g->%g nm.",
                wl_coef[0] * 1e9, wl_coef[-1] * 1e9, wl_data[0] * 1e9,
                wl_data[-1] * 1e9)

        # Interpolate the calibration data for each wl_data
        calib_fitted = numpy.interp(wl_data, wl_coef, coef[:, 0, 0, 0, 0])
        calib_fitted.shape += (1, 1, 1, 1)  # put TZYX dims

        # Compensate the data
        data = data * calib_fitted  # will keep metadata from data

    return data
コード例 #3
0
    def _getPolarProjection(self, pos):
        """
        Return the polar projection of the image at the given position.
        pos (tuple of 2 floats): position (must be part of the ._sempos
        returns DataArray: the polar projection
        """
        if pos in self._polar:
            polard = self._polar[pos]
        else:
            # Compute the polar representation
            data = self._sempos[pos]
            try:
                if numpy.prod(data.shape) > (1280 * 1080):
                    # AR conversion fails one very large images due to too much
                    # memory consumed (> 2Gb). So, rescale + use a "degraded" type that
                    # uses less memory. As the display size is small (compared
                    # to the size of the input image, it shouldn't actually
                    # affect much the output.
                    logging.info(
                        "AR image is very large %s, will convert to "
                        "azymuthal projection in reduced precision.",
                        data.shape)
                    y, x = data.shape
                    if y > x:
                        small_shape = 1024, int(round(1024 * x / y))
                    else:
                        small_shape = int(round(1024 * y / x)), 1024
                    # resize
                    data = img.rescale_hq(data, small_shape)
                    dtype = numpy.float16
                else:
                    dtype = None  # just let the function use the best one

                size = min(min(data.shape) * 2, 1134)

                # TODO: First compute quickly a low resolution and then
                # compute a high resolution version.
                # TODO: could use the size of the canvas that will display
                # the image to save some computation time.

                bg_data = self.background.value
                if bg_data is None:
                    # Simple version: remove the background value
                    data0 = polar.ARBackgroundSubtract(data)
                else:
                    data0 = img.Subtract(data, bg_data)  # metadata from data

                # 2 x size of original image (on smallest axis) and at most
                # the size of a full-screen canvas
                polard = polar.AngleResolved2Polar(data0,
                                                   size,
                                                   hole=False,
                                                   dtype=dtype)
                self._polar[pos] = polard
            except Exception:
                logging.exception("Failed to convert to azymuthal projection")
                return data  # display it raw as fallback

        return polard
コード例 #4
0
ファイル: light.py プロジェクト: ihebdelmic/odemis
def _doTurnOnLight(f, bl, ccd):
    try:
        # We need the light to be off, so that we can notice a difference when
        # it turns on.
        # In case it's already turned on, just assume everything is fine.
        if bl.emissions.value[0] * bl.power.value != 0:
            logging.debug("The light is already on")
            return
        if f._task_state == CANCELLED:
            raise CancelledError()

        # Light turned off, if indeed it's all "black", the avg intensity should
        # roughly correspond to the maximum noise level.
        img_light_off = ccd.data.get(asap=False)
        avg_intensity_off = numpy.average(img_light_off)
        # Intensity which is for sure not caused by noise: +150% of the noise level
        intensity_min_on = avg_intensity_off * 1.5 + 0.1
        logging.debug("Looking for intensity of %s in an %s image",
                      intensity_min_on, img_light_off.shape)
        # Turn the light on, full power!
        bl.power.value = bl.power.range[1]
        bl.emissions.value = [1] * len(bl.emissions.value)
        while True:
            img2 = ccd.data.get()
            try:
                new_img = img.Subtract(img2, img_light_off)
            except ValueError:  # could happen if CCD changed resolution
                new_img = img2 - avg_intensity_off
            if f._task_state == CANCELLED:
                raise CancelledError()
            # number of pixels with higher intensity than the avg minimum
            pixels_high_intensity = numpy.sum(new_img > intensity_min_on)
            # the percent of pixels that have intensity higher than the avg minimum
            a = pixels_high_intensity / new_img.size
            # check whether this percent is larger than 0.5% which indicates that the light is on
            if a > 0.005:
                logging.debug("Detected light on (%f %% pixels above %f)",
                              a * 100, intensity_min_on)
                break
            logging.debug("No light detected (%f %% pixels above %f)", a * 100,
                          intensity_min_on)

    except CancelledError:
        raise  # Just don't log the exception
    except Exception:
        logging.exception("Failure while turning on light %s", bl.name)
        raise
    finally:
        with f._task_lock:
            if f._task_state == CANCELLED:
                raise CancelledError()
            f._task_state = FINISHED
コード例 #5
0
    def _updateHistogram(self, data=None):
        """
        data (DataArray): the raw data to use, default to .raw[0] - background
          (if present).
        If will also update the intensityRange if auto_bc is enabled.
        """
        # Compute histogram and compact version
        if data is None:
            if not self.raw:
                logging.debug("Not computing histogram as .raw is empty")
                return

            data = self.raw[0]
            if isinstance(data, model.DataArrayShadow):
                # Pyramidal => use the smallest version
                data = self._getMergedRawImage(data, data.maxzoom)

            # We only do background subtraction when automatically selecting raw
            bkg = self.background.value
            if bkg is not None:
                try:
                    data = img.Subtract(data, bkg)
                except Exception as ex:
                    logging.info(
                        "Failed to subtract background when computing histogram: %s",
                        ex)

        # Depth can change at each image (depends on hardware settings)
        self._updateDRange(data)

        # Initially, _drange might be None, in which case it will be guessed
        hist, edges = img.histogram(data, irange=self._drange)
        if hist.size > 256:
            chist = img.compactHistogram(hist, 256)
        else:
            chist = hist
        self.histogram._full_hist = hist
        self.histogram._edges = edges
        # First update the value, before the intensityRange subscribers are called...
        self.histogram._value = chist

        if self.auto_bc.value:
            self._recomputeIntensityRange()

        # Notify last, so intensityRange is correct when subscribers get the new histogram
        self.histogram.notify(chist)
コード例 #6
0
ファイル: spot.py プロジェクト: lanery/odemis
def _SubtractBackground(data, background=None):
    # We actually want to make really sure that only real signal is > 0.
    if background is not None:
        # So we subtract the "almost max" of the background signal
        hist, edges = img.histogram(background)
        noise_max = img.findOptimalRange(hist, edges, outliers=1e-6)[1]
    else:
        try:
            noise_max = 1.3 * data.metadata[model.MD_BASELINE]
        except (AttributeError, KeyError):
            # Fallback: take average of the four corner pixels
            noise_max = 1.3 * numpy.mean(
                (data[0, 0], data[0, -1], data[-1, 0], data[-1, -1]))

    noise_max = data.dtype.type(noise_max)  # ensure we don't change the dtype
    data0 = img.Subtract(data, noise_max)
    # Alternative way (might work better if background is really not uniform):
    # 1.3 corresponds to 3 times the noise
    # data0 = img.Subtract(data - 1.3 * background)

    return data0
コード例 #7
0
ファイル: _static.py プロジェクト: ihebdelmic/odemis
    def _project2Polar(self, pos):
        """
        Return the polar projection of the image at the given position.
        pos (float, float, string or None): position (must be part of the ._pos)
        returns DataArray: the polar projection
        """
        # Note: Need a copy of the link to the dict. If self._polar is reset while
        # still running this method, the dict might get new entries again, though it should be empty.
        polar = self._polar
        if pos in polar:
            polard = polar[pos]
        else:
            # Compute the polar representation
            data = self._pos[pos]
            try:
                # Get bg image, if existing. It must match the polarization (defaulting to MD_POL_NONE).
                bg_image = self._getBackground(
                    data.metadata.get(MD_POL_MODE, MD_POL_NONE))

                if bg_image is None:
                    # Simple version: remove the background value
                    data_bg_corr = angleres.ARBackgroundSubtract(data)
                else:
                    data_bg_corr = img.Subtract(data,
                                                bg_image)  # metadata from data

                if numpy.prod(data_bg_corr.shape) > (1280 * 1080):
                    # AR conversion fails with very large images due to too much
                    # memory consumed (> 2Gb). So, rescale + use a "degraded" type that
                    # uses less memory. As the display size is small (compared
                    # to the size of the input image, it shouldn't actually
                    # affect much the output.
                    logging.info(
                        "AR image is very large %s, will convert to "
                        "azimuthal projection in reduced precision.",
                        data_bg_corr.shape)
                    y, x = data_bg_corr.shape
                    if y > x:
                        small_shape = 1024, int(round(1024 * x / y))
                    else:
                        small_shape = int(round(1024 * y / x)), 1024
                    # resize
                    data_bg_corr = img.rescale_hq(data_bg_corr, small_shape)

                # 2 x size of original image (on smallest axis) and at most
                # the size of a full-screen canvas
                size = min(min(data_bg_corr.shape) * 2, 1134)

                # TODO: First compute quickly a low resolution and then
                # compute a high resolution version.
                # TODO: could use the size of the canvas that will display
                # the image to save some computation time.

                # Warning: allocates lot of memory, which will not be free'd until
                # the current thread is terminated.

                polard = angleres.AngleResolved2Polar(data_bg_corr,
                                                      size,
                                                      hole=False)

                # TODO: don't hold too many of them in cache (eg, max 3 * 1134**2)
                polar[pos] = polard
            except Exception:
                logging.exception("Failed to convert to azimuthal projection")
                return data  # display it raw as fallback

        return polard
コード例 #8
0
ファイル: calibration.py プロジェクト: effting/odemis
def apply_spectrum_corrections(data, bckg=None, coef=None):
    """
    Apply the background correction and the spectrum efficiency compensation
    factors to the given data if applicable.
    If the wavelength of the calibration doesn't cover the whole data wavelength,
    the missing wavelength is filled by the same value as the border. Wavelength
    in-between points is linearly interpolated.
    :param data: (DataArray of at least 5 dims) The original data.
            Spectrum data can be of two types:
            - mirror (no wl info)
            - grating (wl info)
        Temporal spectrum data can be of four types:
            - mirror + focus mode (no wl and time info)
            - mirror + operate mode (no wl but time info)
            - grating + focus mode (wl but no time info)
            - grating + operate mode (wl and time info)
        Chronograph data can be only of one type, with no wl but time info. So far no bg correction is
            supported for chronograph data. Spectrum efficiency correction do not apply for this type of data.
    :param bckg: (None or DataArray of at least 5 dims) The background data, with
        CTZYX = C1111 (spectrum), CTZYX = CT111 (temporal spectrum) or CTZYX = 1T111 (time correlator).
    :param coef: (None or DataArray of at least 5 dims) The coefficient data, with CTZXY = C1111.
    :returns: (DataArray) Same shape as original data. Can have dtype=float.
    """

    # handle time correlator data (chronograph) data
    # -> no spectrum efficiency compensation and bg correction supported
    if data.shape[-5] <= 1 and data.shape[-4] > 1:
        raise ValueError(
            "Do not support any background correction or spectrum efficiency "
            "compensation for time correlator (chronograph) data")

    # TODO: use MD_BASELINE as a fallback?
    if bckg is not None:

        # Check that the bg matches the data.
        # TODO: support if the data is binned?
        if data.shape[0:2] != bckg.shape[0:2]:
            raise ValueError(
                "Background should have the same shape as the data, but got %s != %s"
                % (bckg.shape[0:2], data.shape[0:2]))

        # If temporal spectrum data, check for time range and streak mode.
        if model.MD_STREAK_MODE in data.metadata.keys():
            # Check that the data and the bg image were acquired with the same streak mode.
            if data.metadata[model.MD_STREAK_MODE] != bckg.metadata[
                    model.MD_STREAK_MODE]:
                raise ValueError(
                    "Background should have the same streak mode as the data, but got %d != %d"
                    % (bckg.metadata[model.MD_STREAK_MODE],
                       data.metadata[model.MD_STREAK_MODE]))
            # Check that the time range of the data matches with the bg image.
            if data.metadata[model.MD_STREAK_TIMERANGE] != bckg.metadata[
                    model.MD_STREAK_TIMERANGE]:
                raise ValueError(
                    "Background should have the same time range as the data, but got %s != %s"
                    % (bckg.metadata[model.MD_STREAK_TIMERANGE],
                       data.metadata[model.MD_STREAK_TIMERANGE]))

        # Check if we have any wavelength information.
        if model.MD_WL_LIST not in data.metadata:
            # temporal spectrum data, but acquired in mirror mode (with/without time info)
            # spectrum data, but acquired in mirror mode

            # check that bg data also doesn't contain wl info
            if model.MD_WL_LIST in bckg.metadata:
                raise ValueError(
                    "Found MD_WL_LIST metadata in background image, but "
                    "data does not provide any wavelength information")
            data = img.Subtract(data, bckg)

        else:
            # temporal spectrum with wl info (with/without time info)
            # spectrum data with wl info

            # Need to get the calibration data for each wavelength of the data
            wl_data = spectrum.get_wavelength_per_pixel(data)

            # Check that bg data also contains wl info.
            try:
                wl_bckg = spectrum.get_wavelength_per_pixel(bckg)
            except KeyError:
                raise ValueError(
                    "Found no spectrum metadata (MD_WL_LIST) in the background image."
                )

            # Warn if not the same wavelength
            if not numpy.allclose(wl_bckg, wl_data):
                logging.warning(
                    "Spectrum background is between %g->%g nm, "
                    "while the spectrum is between %g->%g nm.",
                    wl_bckg[0] * 1e9, wl_bckg[-1] * 1e9, wl_data[0] * 1e9,
                    wl_data[-1] * 1e9)

            data = img.Subtract(data, bckg)

    if coef is not None:
        # Check if we have any wavelength information in data.
        if model.MD_WL_LIST not in data.metadata:
            raise ValueError(
                "Cannot apply spectrum correction as "
                "data does not provide any wavelength information.")
        if coef.shape[1:] != (1, 1, 1, 1):
            raise ValueError(
                "Spectrum efficiency compensation should have shape C1111.")

        # Need to get the calibration data for each wavelength of the data
        wl_data = spectrum.get_wavelength_per_pixel(data)
        wl_coef = spectrum.get_wavelength_per_pixel(coef)

        # Warn if the calibration is not enough for the data
        if wl_coef[0] > wl_data[0] or wl_coef[-1] < wl_data[-1]:
            logging.warning(
                "Spectrum efficiency compensation is only between "
                "%g->%g nm, while the spectrum is between %g->%g nm.",
                wl_coef[0] * 1e9, wl_coef[-1] * 1e9, wl_data[0] * 1e9,
                wl_data[-1] * 1e9)

        # Interpolate the calibration data for each wl_data
        calib_fitted = numpy.interp(wl_data, wl_coef, coef[:, 0, 0, 0, 0])
        calib_fitted.shape += (1, 1, 1, 1)  # put TZYX dims
        # Compensate the data
        data = data * calib_fitted  # will keep metadata from data

    return data