Esempio n. 1
0
def compensate_spectrum_efficiency(data, bckg=None, coef=None):
    """
    Apply the efficiency compensation factors to the given data.
    If the wavelength of the calibration doesn't cover the whole data wavelength,
    the missing wavelength is filled by the same value as the border. Wavelength
    in-between points is linearly interpolated.
    data (DataArray of at least 5 dims): the original data. Need MD_WL_* metadata
    bckg (None or DataArray of at least 5 dims): the background data, with TZXY = 1111
      Need MD_WL_* metadata.
    coef (None or DataArray of at least 5 dims): the coeficient data, with TZXY = 1111
      Need MD_WL_* metadata.
    returns (DataArray): same shape as original data. Can have dtype=float
    """
    # Need to get the calibration data for each wavelength of the data
    wl_data = spectrum.get_wavelength_per_pixel(data)

    # TODO: use MD_BASELINE as a fallback?
    if bckg is not None:
        if bckg.shape[1:] != (1, 1, 1, 1):
            raise ValueError("bckg should have shape C1111")
        # It must be fitting the data
        # TODO: support if the data is binned?
        if data.shape[0] != bckg.shape[0]:
            raise ValueError(
                "Background should have same length as the data, but got %d != %d"
                % (bckg.shape[0], data.shape[0]))

        wl_bckg = spectrum.get_wavelength_per_pixel(bckg)
        # Warn if not the same wavelength
        if not numpy.allclose(wl_bckg, wl_data):
            logging.warning(
                "Spectrum background is between %g->%g nm, "
                "while the spectrum is between %g->%g nm.", wl_bckg[0] * 1e9,
                wl_bckg[-1] * 1e9, wl_data[0] * 1e9, wl_data[-1] * 1e9)

        data = img.Subtract(data, bckg)

    # We could be more clever if calib has a MD_WL_POLYNOMIAL, but it's very
    # unlikely the calibration is in this form anyway.
    if coef is not None:
        if coef.shape[1:] != (1, 1, 1, 1):
            raise ValueError("coef should have shape C1111")
        wl_coef = spectrum.get_wavelength_per_pixel(coef)

        # Warn if the calibration is not enough for the data
        if wl_coef[0] > wl_data[0] or wl_coef[-1] < wl_data[-1]:
            logging.warning(
                "Spectrum efficiency compensation is only between "
                "%g->%g nm, while the spectrum is between %g->%g nm.",
                wl_coef[0] * 1e9, wl_coef[-1] * 1e9, wl_data[0] * 1e9,
                wl_data[-1] * 1e9)

        # Interpolate the calibration data for each wl_data
        calib_fitted = numpy.interp(wl_data, wl_coef, coef[:, 0, 0, 0, 0])
        calib_fitted.shape += (1, 1, 1, 1)  # put TZYX dims

        # Compensate the data
        data = data * calib_fitted  # will keep metadata from data

    return data
Esempio n. 2
0
def compensate_spectrum_efficiency(data, calib):
    """
    Apply the efficiency compensation factors to the given data.
    If the wavelength of the calibration doesn't cover the whole data wavelength,
    the missing wavelength is filled by the same value as the border. Wavelength
    in-between points is linearly interpolated.
    data (DataArray of at least 5 dims): the original data. Need MD_WL_* metadata
    calib (DataArray of at least 5 dims): the calibration data, with TZXY = 1111
      Need MD_WL_* metadata.
    returns (DataArray): same shape as original data. Can have dtype=float
    """
    # Need to get the calibration data for each wavelength of the data
    wl_data = spectrum.get_wavelength_per_pixel(data)
    # We could be more clever if calib has a MD_WL_POLYNOMIAL, but it's very
    # unlikely the calibration is in this form anyway.
    wl_calib = spectrum.get_wavelength_per_pixel(calib)

    # Warn if the calibration is not enough for the data
    if wl_calib[0] > wl_data[0] or wl_calib[-1] < wl_data[-1]:
        logging.warning("Spectrum efficiency compensation is only between "
                        "%g->%g nm, while the spectrum is between %g-> %g nm.",
                        wl_calib[0] * 1e9, wl_calib[-1] * 1e9,
                        wl_data[0] * 1e9, wl_data[-1] * 1e9)

    # Interpolate the calibration data for each wl_data
    calib_fitted = numpy.interp(wl_data, wl_calib, calib[:, 0, 0, 0, 0])
    calib_fitted.shape += (1, 1, 1, 1) # put TZYX dims

    # Compensate the data
    return data * calib_fitted # will keep metadata from data
Esempio n. 3
0
def compensate_spectrum_efficiency(data, calib):
    """
    Apply the efficiency compensation factors to the given data.
    If the wavelength of the calibration doesn't cover the whole data wavelength,
    the missing wavelength is filled by the same value as the border. Wavelength
    in-between points is linearly interpolated.
    data (DataArray of at least 5 dims): the original data. Need MD_WL_* metadata
    calib (DataArray of at least 5 dims): the calibration data, with TZXY = 1111
      Need MD_WL_* metadata.
    returns (DataArray): same shape as original data. Can have dtype=float
    """
    # Need to get the calibration data for each wavelength of the data
    wl_data = spectrum.get_wavelength_per_pixel(data)
    # We could be more clever if calib has a MD_WL_POLYNOMIAL, but it's very
    # unlikely the calibration is in this form anyway.
    wl_calib = spectrum.get_wavelength_per_pixel(calib)

    # Warn if the calibration is not enough for the data
    if wl_calib[0] > wl_data[0] or wl_calib[-1] < wl_data[-1]:
        logging.warning(
            "Spectrum efficiency compensation is only between "
            "%g->%g nm, while the spectrum is between %g-> %g nm.",
            wl_calib[0] * 1e9, wl_calib[-1] * 1e9, wl_data[0] * 1e9,
            wl_data[-1] * 1e9)

    # Interpolate the calibration data for each wl_data
    calib_fitted = numpy.interp(wl_data, wl_calib, calib[:, 0, 0, 0, 0])
    calib_fitted.shape += (1, 1, 1, 1)  # put TZYX dims

    # Compensate the data
    return data * calib_fitted  # will keep metadata from data
Esempio n. 4
0
def compensate_spectrum_efficiency(data, bckg=None, coef=None):
    """
    Apply the efficiency compensation factors to the given data.
    If the wavelength of the calibration doesn't cover the whole data wavelength,
    the missing wavelength is filled by the same value as the border. Wavelength
    in-between points is linearly interpolated.
    data (DataArray of at least 5 dims): the original data. Need MD_WL_* metadata
    bckg (None or DataArray of at least 5 dims): the background data, with TZXY = 1111
      Need MD_WL_* metadata.
    coef (None or DataArray of at least 5 dims): the coeficient data, with TZXY = 1111
      Need MD_WL_* metadata.
    returns (DataArray): same shape as original data. Can have dtype=float
    """
    # Need to get the calibration data for each wavelength of the data
    wl_data = spectrum.get_wavelength_per_pixel(data)

    # TODO: use MD_BASELINE as a fallback?
    if bckg is not None:
        if bckg.shape[1:] != (1, 1, 1, 1):
            raise ValueError("bckg should have shape C1111")
        # It must be fitting the data
        # TODO: support if the data is binned?
        if data.shape[0] != bckg.shape[0]:
            raise ValueError("Background should have same length as the data, but got %d != %d" %
                             (bckg.shape[0], data.shape[0]))

        wl_bckg = spectrum.get_wavelength_per_pixel(bckg)
        # Warn if not the same wavelength
        if not numpy.allclose(wl_bckg, wl_data):
            logging.warning("Spectrum background is between %g->%g nm, "
                            "while the spectrum is between %g->%g nm.",
                            wl_bckg[0] * 1e9, wl_bckg[-1] * 1e9,
                            wl_data[0] * 1e9, wl_data[-1] * 1e9)

        data = img.Subtract(data, bckg)

    # We could be more clever if calib has a MD_WL_POLYNOMIAL, but it's very
    # unlikely the calibration is in this form anyway.
    if coef is not None:
        if coef.shape[1:] != (1, 1, 1, 1):
            raise ValueError("coef should have shape C1111")
        wl_coef = spectrum.get_wavelength_per_pixel(coef)

        # Warn if the calibration is not enough for the data
        if wl_coef[0] > wl_data[0] or wl_coef[-1] < wl_data[-1]:
            logging.warning("Spectrum efficiency compensation is only between "
                            "%g->%g nm, while the spectrum is between %g->%g nm.",
                            wl_coef[0] * 1e9, wl_coef[-1] * 1e9,
                            wl_data[0] * 1e9, wl_data[-1] * 1e9)

        # Interpolate the calibration data for each wl_data
        calib_fitted = numpy.interp(wl_data, wl_coef, coef[:, 0, 0, 0, 0])
        calib_fitted.shape += (1, 1, 1, 1) # put TZYX dims

        # Compensate the data
        data = data * calib_fitted # will keep metadata from data

    return data
Esempio n. 5
0
    def _on_new_data(self, data):
        """
        Called when a new data is available (in a live stream)
        data (1D DataArray)
        """
        if data.size:
            # TODO: factorize with get_spectrum_range() for static stream?
            try:
                spectrum_range = spectrum.get_wavelength_per_pixel(data)
                unit_x = "m"
            except (ValueError, KeyError):
                # useless polynomial => just show pixels values (ex: -50 -> +50 px)
                max_bw = data.shape[0] // 2
                min_bw = (max_bw - data.shape[0]) + 1
                spectrum_range = range(min_bw, max_bw + 1)
                unit_x = "px"

            self.canvas.set_1d_data(spectrum_range, data, unit_x)

            self.bottom_legend.unit = unit_x
            self.bottom_legend.range = (spectrum_range[0], spectrum_range[-1])
            self.left_legend.range = (min(data), max(data))
            # For testing
            # import random
            # self.left_legend.range = (min(data) + random.randint(0, 100),
            #                           max(data) + random.randint(-100, 100))
        else:
            self.clear()
        self.Refresh()
Esempio n. 6
0
    def _on_new_data(self, data):
        """
        Called when a new data is available (in a live stream)
        data (1D DataArray)
        """
        if data.size:
            # TODO: factorize with get_spectrum_range() for static stream?
            try:
                spectrum_range = spectrum.get_wavelength_per_pixel(data)
                unit_x = "m"
            except (ValueError, KeyError):
                # useless polynomial => just show pixels values (ex: -50 -> +50 px)
                max_bw = data.shape[0] // 2
                min_bw = (max_bw - data.shape[0]) + 1
                spectrum_range = range(min_bw, max_bw + 1)
                unit_x = "px"

            self.canvas.set_1d_data(spectrum_range, data, unit_x)

            self.bottom_legend.unit = unit_x
            self.bottom_legend.range = (spectrum_range[0], spectrum_range[-1])
            self.left_legend.range = (min(data), max(data))
            # For testing
            # import random
            # self.left_legend.range = (min(data) + random.randint(0, 100),
            #                           max(data) + random.randint(-100, 100))
        else:
            self.clear()
        self.Refresh()
Esempio n. 7
0
    def get_spectrum_range(self):
        """ Return the wavelength for each pixel of a (complete) spectrum

        returns (list of numbers or None): one wavelength per spectrum pixel.
          Values are in meters, unless the spectrum cannot be determined, in
          which case integers representing pixels index is returned.
          If no data is available, None is returned.
                (str): unit of spectrum range
        """
        data = self._calibrated

        try:
            return spectrum.get_wavelength_per_pixel(data), "m"
        except (ValueError, KeyError):
            # useless polynomial => just show pixels values (ex: -50 -> +50 px)
            max_bw = data.shape[0] // 2
            min_bw = (max_bw - data.shape[0]) + 1
            return range(min_bw, max_bw + 1), "px"
Esempio n. 8
0
    def get_spectrum_range(self):
        """
        Return the wavelength for each pixel of a (complete) spectrum
        returns (list of numbers or None): one wavelength per spectrum pixel.
          Values are in meters, unless the spectrum cannot be determined, in
          which case integers representing pixels index is returned.
          If no data is available, None is returned.
        """
        # TODO return unit too? (i.e., m or px)
        data = self._calibrated

        try:
            return spectrum.get_wavelength_per_pixel(data)
        except (ValueError, KeyError):
            # useless polynomial => just show pixels values (ex: -50 -> +50 px)
            max_bw = data.shape[0] // 2
            min_bw = (max_bw - data.shape[0]) + 1
            return range(min_bw, max_bw + 1)
Esempio n. 9
0
 def test_wl_polynomial(self):
     shape = (220, 1, 1, 500, 400)
     dtype = numpy.dtype("uint16")
     metadata = {model.MD_SW_VERSION: "1.0-test",
              model.MD_HW_NAME: "fake spec",
              model.MD_DESCRIPTION: "test3d",
              model.MD_ACQ_DATE: time.time(),
              model.MD_BPP: 12,
              model.MD_BINNING: (1, 1), # px, px
              model.MD_PIXEL_SIZE: (1e-6, 2e-5), # m/px
              model.MD_WL_POLYNOMIAL: [500e-9, 1e-9], # m, m/px: wl polynomial
              model.MD_POS: (1e-3, -30e-3), # m
              model.MD_EXP_TIME: 1.2, # s
             }
     da = model.DataArray(numpy.zeros(shape, dtype), metadata)
     
     wl = spectrum.get_wavelength_per_pixel(da)
     self.assertEqual(len(wl), shape[0])
     self.assertEqual(wl[0], metadata[model.MD_WL_POLYNOMIAL][0])
Esempio n. 10
0
def export(filename, data):
    '''
    Write a CSV file:
        - If the given data is AR data then just dump the phi/data array
        - If the given data is spectrum data write it as series of wavelength/intensity
    filename (unicode): filename of the file to create (including path).
    data (model.DataArray): the data to export.
       Metadata is taken directly from the DA object.
    raises:
        IOError in case the spectrum does not contain wavelength metadata.
    '''
    if data.shape[0] > 1 and numpy.prod(data.shape) == data.shape[0]:
        logging.debug("Exporting spectrum data to CSV")
        try:
            spectrum_range = spectrum.get_wavelength_per_pixel(data)
        except Exception:
            # corner case where spectrum range is not available in metadata
            logging.info("Exporting spectrum without wavelength information")
            spectrum_range = None

        if spectrum_range is not None:
            # turn range to nm
            spectrum_tuples = [(s * 1e9, d) for s, d in zip(spectrum_range, data)]
            headers = ['# wavelength (nm)', 'intensity']
        else:
            spectrum_tuples = data.reshape(data.shape[0], 1)
            headers = ['# intensity']

        with open(filename, 'w') as fd:
            csv_writer = csv.writer(fd)
            csv_writer.writerow(headers)
            csv_writer.writerows(spectrum_tuples)
    elif data.ndim == 2 and all(s >= 2 for s in data.shape):
        logging.debug("Exporting AR data to CSV")
        # Data should be in the form of (Y+1, X+1), with the first row and colum the angles
        with open(filename, 'w') as fd:
            csv_writer = csv.writer(fd)
            # Set the 'header' in the 0,0 element
            first_row = ['theta\phi(rad)'] + [d for d in data[0, 1:]]
            csv_writer.writerow(first_row)
            # dump the array
            csv_writer.writerows(data[1:, :])
Esempio n. 11
0
    def test_wl_list(self):
        shape = (220, 1, 1, 50, 400)
        dtype = numpy.dtype("uint16")
        wl_orig = (400e-9 + numpy.array(range(shape[0])) * 10e-9).tolist()
        metadata = {model.MD_SW_VERSION: "1.0-test",
                 model.MD_HW_NAME: "fake spec",
                 model.MD_DESCRIPTION: "test3d",
                 model.MD_ACQ_DATE: time.time(),
                 model.MD_BPP: 12,
                 model.MD_BINNING: (1, 1), # px, px
                 model.MD_PIXEL_SIZE: (1e-6, 2e-5), # m/px
                 model.MD_WL_LIST: wl_orig,
                 model.MD_POS: (1e-3, -30e-3), # m
                 model.MD_EXP_TIME: 1.2, # s
                }
        da = model.DataArray(numpy.zeros(shape, dtype), metadata)

        wl = spectrum.get_wavelength_per_pixel(da)
        self.assertEqual(len(wl), shape[0])
        self.assertEqual(wl, wl_orig)
Esempio n. 12
0
    def __init__(self, name, image):
        """
        name (string)
        image (model.DataArray of shape (CYX) or (C11YX)). The metadata
        MD_WL_POLYNOMIAL should be included in order to associate the C to a
        wavelength.
        """
        self._calibrated = None  # just for the _updateDRange to not complain
        Stream.__init__(self, name, None, None, None)
        # Spectrum stream has in addition to normal stream:
        #  * information about the current bandwidth displayed (avg. spectrum)
        #  * coordinates of 1st point (1-point, line)
        #  * coordinates of 2nd point (line)

        if len(image.shape) == 3:
            # force 5D
            image = image[:, numpy.newaxis, numpy.newaxis, :, :]
        elif len(image.shape) != 5 or image.shape[1:3] != (1, 1):
            logging.error("Cannot handle data of shape %s", image.shape)
            raise NotImplementedError("SpectrumStream needs a cube data")

        # ## this is for "average spectrum" projection
        try:
            # cached list of wavelength for each pixel pos
            self._wl_px_values = spectrum.get_wavelength_per_pixel(image)
        except (ValueError, KeyError):
            # useless polynomial => just show pixels values (ex: -50 -> +50 px)
            # TODO: try to make them always int?
            max_bw = image.shape[0] // 2
            min_bw = (max_bw - image.shape[0]) + 1
            self._wl_px_values = range(min_bw, max_bw + 1)
            assert (len(self._wl_px_values) == image.shape[0])
            unit_bw = "px"
            cwl = (max_bw + min_bw) // 2
            width = image.shape[0] // 12
        else:
            min_bw, max_bw = self._wl_px_values[0], self._wl_px_values[-1]
            unit_bw = "m"
            cwl = (max_bw + min_bw) / 2
            width = (max_bw - min_bw) / 12

        # TODO: allow to pass the calibration data as argument to avoid
        # recomputing the data just after init?
        # Spectrum efficiency compensation data: None or a DataArray (cf acq.calibration)
        self.efficiencyCompensation = model.VigilantAttribute(
            None, setter=self._setEffComp)

        # The background data (typically, an acquisition without ebeam).
        # It is subtracted from the acquisition data.
        # If set to None, a simple baseline background value is subtracted.
        self.background = model.VigilantAttribute(None,
                                                  setter=self._setBackground)

        # low/high values of the spectrum displayed
        self.spectrumBandwidth = model.TupleContinuous(
            (cwl - width, cwl + width),
            range=((min_bw, min_bw), (max_bw, max_bw)),
            unit=unit_bw,
            cls=(int, long, float))

        # Whether the (per bandwidth) display should be split intro 3 sub-bands
        # which are applied to RGB
        self.fitToRGB = model.BooleanVA(False)

        self._drange = None

        # This attribute is used to keep track of any selected pixel within the
        # data for the display of a spectrum
        self.selected_pixel = model.TupleVA((None, None))  # int, int

        # first point, second point in pixels. It must be 2 elements long.
        self.selected_line = model.ListVA([(None, None), (None, None)],
                                          setter=self._setLine)

        # The thickness of a point of a line (shared).
        # A point of width W leads to the average value between all the pixels
        # which are within W/2 from the center of the point.
        # A line of width W leads to a 1D spectrum taking into account all the
        # pixels which fit on an orthogonal line to the selected line at a
        # distance <= W/2.
        self.width = model.IntContinuous(1, [1, 50], unit="px")

        self.fitToRGB.subscribe(self.onFitToRGB)
        self.spectrumBandwidth.subscribe(self.onSpectrumBandwidth)
        self.efficiencyCompensation.subscribe(self._onCalib)
        self.background.subscribe(self._onCalib)

        self.raw = [image
                    ]  # for compatibility with other streams (like saving...)
        self._calibrated = image  # the raw data after calibration

        self._updateDRange()
        self._updateHistogram()
        self._updateImage()
Esempio n. 13
0
def apply_spectrum_corrections(data, bckg=None, coef=None):
    """
    Apply the background correction and the spectrum efficiency compensation
    factors to the given data if applicable.
    If the wavelength of the calibration doesn't cover the whole data wavelength,
    the missing wavelength is filled by the same value as the border. Wavelength
    in-between points is linearly interpolated.
    :param data: (DataArray of at least 5 dims) The original data.
            Spectrum data can be of two types:
            - mirror (no wl info)
            - grating (wl info)
        Temporal spectrum data can be of four types:
            - mirror + focus mode (no wl and time info)
            - mirror + operate mode (no wl but time info)
            - grating + focus mode (wl but no time info)
            - grating + operate mode (wl and time info)
        Chronograph data can be only of one type, with no wl but time info. So far no bg correction is
            supported for chronograph data. Spectrum efficiency correction do not apply for this type of data.
    :param bckg: (None or DataArray of at least 5 dims) The background data, with
        CTZYX = C1111 (spectrum), CTZYX = CT111 (temporal spectrum) or CTZYX = 1T111 (time correlator).
    :param coef: (None or DataArray of at least 5 dims) The coefficient data, with CTZXY = C1111.
    :returns: (DataArray) Same shape as original data. Can have dtype=float.
    """

    # handle time correlator data (chronograph) data
    # -> no spectrum efficiency compensation and bg correction supported
    if data.shape[-5] <= 1 and data.shape[-4] > 1:
        raise ValueError(
            "Do not support any background correction or spectrum efficiency "
            "compensation for time correlator (chronograph) data")

    # TODO: use MD_BASELINE as a fallback?
    if bckg is not None:

        # Check that the bg matches the data.
        # TODO: support if the data is binned?
        if data.shape[0:2] != bckg.shape[0:2]:
            raise ValueError(
                "Background should have the same shape as the data, but got %s != %s"
                % (bckg.shape[0:2], data.shape[0:2]))

        # If temporal spectrum data, check for time range and streak mode.
        if model.MD_STREAK_MODE in data.metadata.keys():
            # Check that the data and the bg image were acquired with the same streak mode.
            if data.metadata[model.MD_STREAK_MODE] != bckg.metadata[
                    model.MD_STREAK_MODE]:
                raise ValueError(
                    "Background should have the same streak mode as the data, but got %d != %d"
                    % (bckg.metadata[model.MD_STREAK_MODE],
                       data.metadata[model.MD_STREAK_MODE]))
            # Check that the time range of the data matches with the bg image.
            if data.metadata[model.MD_STREAK_TIMERANGE] != bckg.metadata[
                    model.MD_STREAK_TIMERANGE]:
                raise ValueError(
                    "Background should have the same time range as the data, but got %s != %s"
                    % (bckg.metadata[model.MD_STREAK_TIMERANGE],
                       data.metadata[model.MD_STREAK_TIMERANGE]))

        # Check if we have any wavelength information.
        if model.MD_WL_LIST not in data.metadata:
            # temporal spectrum data, but acquired in mirror mode (with/without time info)
            # spectrum data, but acquired in mirror mode

            # check that bg data also doesn't contain wl info
            if model.MD_WL_LIST in bckg.metadata:
                raise ValueError(
                    "Found MD_WL_LIST metadata in background image, but "
                    "data does not provide any wavelength information")
            data = img.Subtract(data, bckg)

        else:
            # temporal spectrum with wl info (with/without time info)
            # spectrum data with wl info

            # Need to get the calibration data for each wavelength of the data
            wl_data = spectrum.get_wavelength_per_pixel(data)

            # Check that bg data also contains wl info.
            try:
                wl_bckg = spectrum.get_wavelength_per_pixel(bckg)
            except KeyError:
                raise ValueError(
                    "Found no spectrum metadata (MD_WL_LIST) in the background image."
                )

            # Warn if not the same wavelength
            if not numpy.allclose(wl_bckg, wl_data):
                logging.warning(
                    "Spectrum background is between %g->%g nm, "
                    "while the spectrum is between %g->%g nm.",
                    wl_bckg[0] * 1e9, wl_bckg[-1] * 1e9, wl_data[0] * 1e9,
                    wl_data[-1] * 1e9)

            data = img.Subtract(data, bckg)

    if coef is not None:
        # Check if we have any wavelength information in data.
        if model.MD_WL_LIST not in data.metadata:
            raise ValueError(
                "Cannot apply spectrum correction as "
                "data does not provide any wavelength information.")
        if coef.shape[1:] != (1, 1, 1, 1):
            raise ValueError(
                "Spectrum efficiency compensation should have shape C1111.")

        # Need to get the calibration data for each wavelength of the data
        wl_data = spectrum.get_wavelength_per_pixel(data)
        wl_coef = spectrum.get_wavelength_per_pixel(coef)

        # Warn if the calibration is not enough for the data
        if wl_coef[0] > wl_data[0] or wl_coef[-1] < wl_data[-1]:
            logging.warning(
                "Spectrum efficiency compensation is only between "
                "%g->%g nm, while the spectrum is between %g->%g nm.",
                wl_coef[0] * 1e9, wl_coef[-1] * 1e9, wl_data[0] * 1e9,
                wl_data[-1] * 1e9)

        # Interpolate the calibration data for each wl_data
        calib_fitted = numpy.interp(wl_data, wl_coef, coef[:, 0, 0, 0, 0])
        calib_fitted.shape += (1, 1, 1, 1)  # put TZYX dims
        # Compensate the data
        data = data * calib_fitted  # will keep metadata from data

    return data
Esempio n. 14
0
    def __init__(self, name, image):
        """
        name (string)
        image (model.DataArray(Shadow) of shape (CYX) or (C11YX)). The metadata
        MD_WL_POLYNOMIAL or MD_WL_LIST should be included in order to associate the C to a
        wavelength.
        """
        # Spectrum stream has in addition to normal stream:
        #  * information about the current bandwidth displayed (avg. spectrum)
        #  * coordinates of 1st point (1-point, line)
        #  * coordinates of 2nd point (line)

        # TODO: need to handle DAS properly, in case it's tiled (in XY), to avoid
        # loading too much data in memory.
        # Ensure the data is a DataArray, as we don't handle (yet) DAS
        if isinstance(image, model.DataArrayShadow):
            image = image.getData()

        if len(image.shape) == 3:
            # force 5D
            image = image[:, numpy.newaxis, numpy.newaxis, :, :]
        elif len(image.shape) != 5 or image.shape[1:3] != (1, 1):
            logging.error("Cannot handle data of shape %s", image.shape)
            raise NotImplementedError("SpectrumStream needs a cube data")

        # This is for "average spectrum" projection
        try:
            # cached list of wavelength for each pixel pos
            self._wl_px_values = spectrum.get_wavelength_per_pixel(image)
        except (ValueError, KeyError):
            # useless polynomial => just show pixels values (ex: -50 -> +50 px)
            # TODO: try to make them always int?
            max_bw = image.shape[0] // 2
            min_bw = (max_bw - image.shape[0]) + 1
            self._wl_px_values = range(min_bw, max_bw + 1)
            assert(len(self._wl_px_values) == image.shape[0])
            unit_bw = "px"
            cwl = (max_bw + min_bw) // 2
            width = image.shape[0] // 12
        else:
            min_bw, max_bw = self._wl_px_values[0], self._wl_px_values[-1]
            unit_bw = "m"
            cwl = (max_bw + min_bw) / 2
            width = (max_bw - min_bw) / 12

        # TODO: allow to pass the calibration data as argument to avoid
        # recomputing the data just after init?
        # Spectrum efficiency compensation data: None or a DataArray (cf acq.calibration)
        self.efficiencyCompensation = model.VigilantAttribute(None, setter=self._setEffComp)

        # The background data (typically, an acquisition without e-beam).
        # It is subtracted from the acquisition data.
        # If set to None, a simple baseline background value is subtracted.
        self.background = model.VigilantAttribute(None, setter=self._setBackground)

        # low/high values of the spectrum displayed
        self.spectrumBandwidth = model.TupleContinuous(
                                    (cwl - width, cwl + width),
                                    range=((min_bw, min_bw), (max_bw, max_bw)),
                                    unit=unit_bw,
                                    cls=(int, long, float))

        # Whether the (per bandwidth) display should be split intro 3 sub-bands
        # which are applied to RGB
        self.fitToRGB = model.BooleanVA(False)

        # This attribute is used to keep track of any selected pixel within the
        # data for the display of a spectrum
        self.selected_pixel = model.TupleVA((None, None))  # int, int

        # first point, second point in pixels. It must be 2 elements long.
        self.selected_line = model.ListVA([(None, None), (None, None)], setter=self._setLine)

        # Peak method index, None if spectrum peak fitting curve is not displayed
        self.peak_method = model.VAEnumerated("gaussian", {"gaussian", "lorentzian", None})

        # The thickness of a point or a line (shared).
        # A point of width W leads to the average value between all the pixels
        # which are within W/2 from the center of the point.
        # A line of width W leads to a 1D spectrum taking into account all the
        # pixels which fit on an orthogonal line to the selected line at a
        # distance <= W/2.
        self.selectionWidth = model.IntContinuous(1, [1, 50], unit="px")

        self.fitToRGB.subscribe(self.onFitToRGB)
        self.spectrumBandwidth.subscribe(self.onSpectrumBandwidth)
        self.efficiencyCompensation.subscribe(self._onCalib)
        self.background.subscribe(self._onCalib)
        self.selectionWidth.subscribe(self._onSelectionWidth)

        self._calibrated = image  # the raw data after calibration
        super(StaticSpectrumStream, self).__init__(name, [image])

        # Automatically select point/line if data is small (can only be done
        # after .raw is set)
        if image.shape[-2:] == (1, 1):  # Only one point => select it immediately
            self.selected_pixel.value = (0, 0)
        elif image.shape[-2] == 1:  # Horizontal line => select line immediately
            self.selected_line.value = [(0, 0), (image.shape[-1] - 1, 0)]
        elif image.shape[-1] == 1:  # Vertical line => select line immediately
            self.selected_line.value = [(0, 0), (0, image.shape[-2] - 1)]
Esempio n. 15
0
def _add_image_info(group, dataset, image, rgb=False):
    """
    Adds the basic metadata information about an image (scale and offset)
    group (HDF Group): the group that contains the dataset
    dataset (HDF Dataset): the image dataset
    image (DataArray >= 2D): image with metadata, the last 2 dimensions are Y and X (H,W)
    rgb (bool): If True, will consider the dimension of length 3 as channel
    """
    # Note: DimensionScale support is only part of h5py since v2.1
    
    # Dimensions
    if rgb:
        # expect 3 dimensions, with one of len==3 (=> C)
        left_dim = ["X", "Y"]
        for i, d in enumerate(dataset.dims):
            if image.shape[i] == 3:
                dname = "C"
            else:
                dname = left_dim.pop()
            d.label = dname
        l = 2 # trick to force to only put info for X and Y
    else:
        # The order of the dimension is reversed (the slowest changing is last)
        l = len(dataset.dims)
        dataset.dims[l - 1].label = "X"
        dataset.dims[l - 2].label = "Y"
        # support more dimensions if available:
        if l >= 3:
            dataset.dims[l - 3].label = "Z"
        if l >= 4:
            dataset.dims[l - 4].label = "T"
        if l >= 5:
            dataset.dims[l - 5].label = "C"

    # Offset
    if model.MD_POS in image.metadata:
        pos = image.metadata[model.MD_POS]
        group["XOffset"] = pos[0]
        _h5svi_set_state(group["XOffset"], ST_REPORTED)
        group["XOffset"].attrs["UNIT"] = "m" # our extension
        group["YOffset"] = pos[1]
        _h5svi_set_state(group["YOffset"], ST_REPORTED)
        group["YOffset"].attrs["UNIT"] = "m" # our extension

    # Time
    # TODO:
    # Surprisingly (for such a usual type), time storage is a mess in HDF5.
    # The documentation states that you can use H5T_TIME, but it is
    # "is not supported. If H5T_TIME is used, the resulting data will be readable
    # and modifiable only on the originating computing platform; it will not be
    # portable to other platforms.". It appears many format are allowed.
    # In addition in h5py, it's indicated as "deprecated" (although it seems
    # it was added in the latest version of HDF5).
    # Moreover, the only types available are 32 and 64 bits integers as number
    # of seconds since epoch. No past, no milliseconds, no time-zone.
    # So there are other proposals like in in F5
    # (http://sciviz.cct.lsu.edu/papers/2007/F5TimeSemantics.pdf) to represent
    # time with a float, a unit and an offset.
    # KNMI uses a string like this: DD-MON-YYYY;HH:MM:SS.sss.
    # (cf http://www.knmi.nl/~beekhuis/documents/publicdocs/ir2009-01_hdftag36.pdf)
    # So, to not solve anything, we save the date as a float representing the
    # Unix time. At least it makes Huygens happy.
    if model.MD_ACQ_DATE in image.metadata:
        # For a ISO 8601 string:
#        ad = datetime.utcfromtimestamp(image.metadata[model.MD_ACQ_DATE])
#        adstr = ad.strftime("%Y-%m-%dT%H:%M:%S.%f")
#        group["TOffset"] = adstr
        group["TOffset"] = image.metadata[model.MD_ACQ_DATE]
        _h5svi_set_state(group["TOffset"], ST_REPORTED)
    else:
        group["TOffset"] = time.time()
        _h5svi_set_state(group["TOffset"], ST_DEFAULT)
    group["TOffset"].attrs["UNIT"] = "s" # our extension

    # Scale
    if model.MD_PIXEL_SIZE in image.metadata:
        # DimensionScales are not clearly explained in the specification to
        # understand what they are supposed to represent. Surprisingly, there
        # is no official way to attach a unit.
        # Huygens seems to consider it's in m
        pxs = image.metadata[model.MD_PIXEL_SIZE]
        group["DimensionScaleX"] = pxs[0]
        group["DimensionScaleX"].attrs["UNIT"] = "m" # our extension
        _h5svi_set_state(group["DimensionScaleX"], ST_REPORTED)
        group["DimensionScaleY"] = pxs[1]
        group["DimensionScaleY"].attrs["UNIT"] = "m"
        _h5svi_set_state(group["DimensionScaleY"], ST_REPORTED)
        # No clear what's the relation between this name and the label
        dataset.dims.create_scale(group["DimensionScaleX"], "X")
        dataset.dims.create_scale(group["DimensionScaleY"], "Y")
        dataset.dims[l - 1].attach_scale(group["DimensionScaleX"])
        dataset.dims[l - 2].attach_scale(group["DimensionScaleY"])

    # Unknown data, but SVI needs them to take the scales into consideration
    if l >= 3:
        group["ZOffset"] = 0.0
        _h5svi_set_state(group["ZOffset"], ST_DEFAULT)
        group["DimensionScaleZ"] = 1e-3 # m
        group["DimensionScaleZ"].attrs["UNIT"] = "m"
        dataset.dims.create_scale(group["DimensionScaleZ"], "Z")
        _h5svi_set_state(group["DimensionScaleZ"], ST_DEFAULT)
        dataset.dims[l - 3].attach_scale(group["DimensionScaleZ"])

        # Put here to please Huygens
        # Seems to be the coverslip position, ie, the lower and upper glass of
        # the sample. Not clear what's the relation with ZOffset.
        group["PrimaryGlassMediumInterfacePosition"] = 0.0 # m?
        _h5svi_set_state(group["PrimaryGlassMediumInterfacePosition"], ST_DEFAULT)
        group["SecondaryGlassMediumInterfacePosition"] = 1.0 # m?
        _h5svi_set_state(group["SecondaryGlassMediumInterfacePosition"], ST_DEFAULT)

    if l >= 4:
        group["DimensionScaleT"] = 1.0 # s
        group["DimensionScaleT"].attrs["UNIT"] = "s"
        # No clear what's the relation between this name and the label
        dataset.dims.create_scale(group["DimensionScaleT"], "T")
        _h5svi_set_state(group["DimensionScaleT"], ST_DEFAULT)
        dataset.dims[l - 4].attach_scale(group["DimensionScaleT"])

    # Wavelength (for spectrograms)
    if (l >= 5 and
        set(image.metadata.keys()) & {model.MD_WL_LIST, model.MD_WL_POLYNOMIAL}):
        try:
            # polynomial of degree = 2 => linear, so use compact notation
            if (model.MD_WL_POLYNOMIAL in image.metadata and
                len(image.metadata[model.MD_WL_POLYNOMIAL]) == 2):
                pn = image.metadata[model.MD_WL_POLYNOMIAL]
                group["COffset"] = pn[0]
                _h5svi_set_state(group["COffset"], ST_REPORTED)
                group["DimensionScaleC"] = pn[1] # m
            else:
                wll = spectrum.get_wavelength_per_pixel(image)
                # list or polynomial of degree > 2 => store the values of each
                # pixel index explicitly. We follow another way to express
                # scaling in HDF5.
                group["DimensionScaleC"] = wll # m

            group["DimensionScaleC"].attrs["UNIT"] = "m"
            dataset.dims.create_scale(group["DimensionScaleC"], "C")
            _h5svi_set_state(group["DimensionScaleC"], ST_REPORTED)
            dataset.dims[l - 5].attach_scale(group["DimensionScaleC"])
        except Exception:
            logging.warning("Failed to record wavelength information, "
                            "it will not be saved.")
Esempio n. 16
0
def _add_image_info(group, dataset, image):
    """
    Adds the basic metadata information about an image (scale, offset, and rotation)
    group (HDF Group): the group that contains the dataset
    dataset (HDF Dataset): the image dataset
    image (DataArray >= 2D): image with metadata, the last 2 dimensions are Y and X (H,W)
    """
    # Note: DimensionScale support is only part of h5py since v2.1
    
    # Dimensions
    l = image.ndim
    dims = image.metadata.get(model.MD_DIMS, "CTZYX"[-l::])
            
    for i, d in enumerate(dataset.dims):
        d.label = dims[i]
    
    # FIXME: We map the position of the center to X/YOffset. That's quite
    # contrary to the feeling that the position of a pixel should be read as
    # XOffset + i * DimensionScaleX. => It would be more logical to set
    # X/YOffset as the position of pixel 0,0. (The drawback being that we need
    # to have a precise idea of the size of a pixel to position the image)

    # Offset
    if model.MD_POS in image.metadata:
        pos = image.metadata[model.MD_POS]
        group["XOffset"] = pos[0]
        _h5svi_set_state(group["XOffset"], ST_REPORTED)
        group["XOffset"].attrs["UNIT"] = "m" # our extension
        group["YOffset"] = pos[1]
        _h5svi_set_state(group["YOffset"], ST_REPORTED)
        group["YOffset"].attrs["UNIT"] = "m" # our extension

    # Time
    # TODO:
    # Surprisingly (for such a usual type), time storage is a mess in HDF5.
    # The documentation states that you can use H5T_TIME, but it is
    # "is not supported. If H5T_TIME is used, the resulting data will be readable
    # and modifiable only on the originating computing platform; it will not be
    # portable to other platforms.". It appears many format are allowed.
    # In addition in h5py, it's indicated as "deprecated" (although it seems
    # it was added in the latest version of HDF5).
    # Moreover, the only types available are 32 and 64 bits integers as number
    # of seconds since epoch. No past, no milliseconds, no time-zone.
    # So there are other proposals like in in F5
    # (http://sciviz.cct.lsu.edu/papers/2007/F5TimeSemantics.pdf) to represent
    # time with a float, a unit and an offset.
    # KNMI uses a string like this: DD-MON-YYYY;HH:MM:SS.sss.
    # (cf http://www.knmi.nl/~beekhuis/documents/publicdocs/ir2009-01_hdftag36.pdf)
    # So, to not solve anything, we save the date as a float representing the
    # Unix time. At least it makes Huygens happy.
    if model.MD_ACQ_DATE in image.metadata:
        # For a ISO 8601 string:
#        ad = datetime.utcfromtimestamp(image.metadata[model.MD_ACQ_DATE])
#        adstr = ad.strftime("%Y-%m-%dT%H:%M:%S.%f")
#        group["TOffset"] = adstr
        group["TOffset"] = image.metadata[model.MD_ACQ_DATE]
        _h5svi_set_state(group["TOffset"], ST_REPORTED)
    else:
        group["TOffset"] = time.time()
        _h5svi_set_state(group["TOffset"], ST_DEFAULT)
    group["TOffset"].attrs["UNIT"] = "s" # our extension

    # Scale
    if model.MD_PIXEL_SIZE in image.metadata:
        # DimensionScales are not clearly explained in the specification to
        # understand what they are supposed to represent. Surprisingly, there
        # is no official way to attach a unit.
        # Huygens seems to consider it's in m
        xpos = dims.index("X")
        ypos = dims.index("Y")
        pxs = image.metadata[model.MD_PIXEL_SIZE]
        group["DimensionScaleX"] = pxs[0]
        group["DimensionScaleX"].attrs["UNIT"] = "m" # our extension
        _h5svi_set_state(group["DimensionScaleX"], ST_REPORTED)
        group["DimensionScaleY"] = pxs[1]
        group["DimensionScaleY"].attrs["UNIT"] = "m"
        _h5svi_set_state(group["DimensionScaleY"], ST_REPORTED)
        # No clear what's the relation between this name and the label
        dataset.dims.create_scale(group["DimensionScaleX"], "X")
        dataset.dims.create_scale(group["DimensionScaleY"], "Y")
        dataset.dims[xpos].attach_scale(group["DimensionScaleX"])
        dataset.dims[ypos].attach_scale(group["DimensionScaleY"])

    # Unknown data, but SVI needs them to take the scales into consideration
    if "Z" in dims:
        zpos = dims.index("Z")
        group["ZOffset"] = 0.0
        _h5svi_set_state(group["ZOffset"], ST_DEFAULT)
        group["DimensionScaleZ"] = 1e-3 # m
        group["DimensionScaleZ"].attrs["UNIT"] = "m"
        dataset.dims.create_scale(group["DimensionScaleZ"], "Z")
        _h5svi_set_state(group["DimensionScaleZ"], ST_DEFAULT)
        dataset.dims[zpos].attach_scale(group["DimensionScaleZ"])

        # Put here to please Huygens
        # Seems to be the coverslip position, ie, the lower and upper glass of
        # the sample. Not clear what's the relation with ZOffset.
        group["PrimaryGlassMediumInterfacePosition"] = 0.0 # m?
        _h5svi_set_state(group["PrimaryGlassMediumInterfacePosition"], ST_DEFAULT)
        group["SecondaryGlassMediumInterfacePosition"] = 1.0 # m?
        _h5svi_set_state(group["SecondaryGlassMediumInterfacePosition"], ST_DEFAULT)

    if "T" in dims:
        tpos = dims.index("T")
        group["DimensionScaleT"] = 1.0 # s
        group["DimensionScaleT"].attrs["UNIT"] = "s"
        # No clear what's the relation between this name and the label
        dataset.dims.create_scale(group["DimensionScaleT"], "T")
        _h5svi_set_state(group["DimensionScaleT"], ST_DEFAULT)
        dataset.dims[tpos].attach_scale(group["DimensionScaleT"])

    # Wavelength (for spectrograms)
    if ("C" in dims and
        set(image.metadata.keys()) & {model.MD_WL_LIST, model.MD_WL_POLYNOMIAL}):
        try:
            # polynomial of degree = 2 => linear, so use compact notation
            if (model.MD_WL_POLYNOMIAL in image.metadata and
                len(image.metadata[model.MD_WL_POLYNOMIAL]) == 2):
                pn = image.metadata[model.MD_WL_POLYNOMIAL]
                group["COffset"] = pn[0]
                _h5svi_set_state(group["COffset"], ST_REPORTED)
                group["DimensionScaleC"] = pn[1] # m
            else:
                wll = spectrum.get_wavelength_per_pixel(image)
                # list or polynomial of degree > 2 => store the values of each
                # pixel index explicitly. We follow another way to express
                # scaling in HDF5.
                group["DimensionScaleC"] = wll # m

            group["DimensionScaleC"].attrs["UNIT"] = "m"
            dataset.dims.create_scale(group["DimensionScaleC"], "C")
            _h5svi_set_state(group["DimensionScaleC"], ST_REPORTED)
            cpos = dims.index("C")
            dataset.dims[cpos].attach_scale(group["DimensionScaleC"])
        except Exception:
            logging.warning("Failed to record wavelength information, "
                            "it will not be saved.")
        

    # Rotation (3-scalar): X,Y,Z of the rotation vector, with the norm being the
    # angle in radians (according to the right-hand rule)
    if model.MD_ROTATION in image.metadata:
        # In Odemis we only support 2D rotation, so just around Z
        group["Rotation"] = (0, 0, image.metadata[model.MD_ROTATION])
        _h5svi_set_state(group["Rotation"], ST_REPORTED)
        group["Rotation"].attrs["UNIT"] = "rad"
Esempio n. 17
0
    def __init__(self, name, image):
        """
        name (string)
        image (model.DataArray of shape (CYX) or (C11YX)). The metadata
        MD_WL_POLYNOMIAL or MD_WL_LIST should be included in order to associate the C to a
        wavelength.
        """
        # Spectrum stream has in addition to normal stream:
        #  * information about the current bandwidth displayed (avg. spectrum)
        #  * coordinates of 1st point (1-point, line)
        #  * coordinates of 2nd point (line)

        if len(image.shape) == 3:
            # force 5D
            image = image[:, numpy.newaxis, numpy.newaxis, :, :]
        elif len(image.shape) != 5 or image.shape[1:3] != (1, 1):
            logging.error("Cannot handle data of shape %s", image.shape)
            raise NotImplementedError("SpectrumStream needs a cube data")

        # ## this is for "average spectrum" projection
        try:
            # cached list of wavelength for each pixel pos
            self._wl_px_values = spectrum.get_wavelength_per_pixel(image)
        except (ValueError, KeyError):
            # useless polynomial => just show pixels values (ex: -50 -> +50 px)
            # TODO: try to make them always int?
            max_bw = image.shape[0] // 2
            min_bw = (max_bw - image.shape[0]) + 1
            self._wl_px_values = range(min_bw, max_bw + 1)
            assert(len(self._wl_px_values) == image.shape[0])
            unit_bw = "px"
            cwl = (max_bw + min_bw) // 2
            width = image.shape[0] // 12
        else:
            min_bw, max_bw = self._wl_px_values[0], self._wl_px_values[-1]
            unit_bw = "m"
            cwl = (max_bw + min_bw) / 2
            width = (max_bw - min_bw) / 12

        # TODO: allow to pass the calibration data as argument to avoid
        # recomputing the data just after init?
        # Spectrum efficiency compensation data: None or a DataArray (cf acq.calibration)
        self.efficiencyCompensation = model.VigilantAttribute(None, setter=self._setEffComp)

        # The background data (typically, an acquisition without e-beam).
        # It is subtracted from the acquisition data.
        # If set to None, a simple baseline background value is subtracted.
        self.background = model.VigilantAttribute(None, setter=self._setBackground)

        # low/high values of the spectrum displayed
        self.spectrumBandwidth = model.TupleContinuous(
                                    (cwl - width, cwl + width),
                                    range=((min_bw, min_bw), (max_bw, max_bw)),
                                    unit=unit_bw,
                                    cls=(int, long, float))

        # Whether the (per bandwidth) display should be split intro 3 sub-bands
        # which are applied to RGB
        self.fitToRGB = model.BooleanVA(False)

        # This attribute is used to keep track of any selected pixel within the
        # data for the display of a spectrum
        self.selected_pixel = model.TupleVA((None, None))  # int, int

        # first point, second point in pixels. It must be 2 elements long.
        self.selected_line = model.ListVA([(None, None), (None, None)], setter=self._setLine)

        # Peak method index, None if spectrum peak fitting curve is not displayed
        self.peak_method = model.VAEnumerated("gaussian", {"gaussian", "lorentzian", None})

        # The thickness of a point or a line (shared).
        # A point of width W leads to the average value between all the pixels
        # which are within W/2 from the center of the point.
        # A line of width W leads to a 1D spectrum taking into account all the
        # pixels which fit on an orthogonal line to the selected line at a
        # distance <= W/2.
        self.selectionWidth = model.IntContinuous(1, [1, 50], unit="px")

        self.fitToRGB.subscribe(self.onFitToRGB)
        self.spectrumBandwidth.subscribe(self.onSpectrumBandwidth)
        self.efficiencyCompensation.subscribe(self._onCalib)
        self.background.subscribe(self._onCalib)
        self.selectionWidth.subscribe(self._onSelectionWidth)

        self._calibrated = image  # the raw data after calibration
        super(StaticSpectrumStream, self).__init__(name, [image])

        # Automatically select point/line if data is small (can only be done
        # after .raw is set)
        if image.shape[-2:] == (1, 1):  # Only one point => select it immediately
            self.selected_pixel.value = (0, 0)
        elif image.shape[-2] == 1:  # Horizontal line => select line immediately
            self.selected_line.value = [(0, 0), (image.shape[-1] - 1, 0)]
        elif image.shape[-1] == 1:  # Vertical line => select line immediately
            self.selected_line.value = [(0, 0), (0, image.shape[-2] - 1)]
Esempio n. 18
0
def export(filename, data):
    '''
    Write a CSV file:
        - If the given data is AR data then just dump the phi/data array
        - If the given data is spectrum data write it as series of wavelength/intensity
    filename (unicode): filename of the file to create (including path).
    data (model.DataArray): the data to export.
       Metadata is taken directly from the DA object.
    raises:
        IOError in case the spectrum does not contain wavelength metadata.
    '''
    if data.metadata.get(model.MD_ACQ_TYPE, None) == model.MD_AT_SPECTRUM:
        try:
            spectrum_range, unit = spectrum.get_wavelength_per_pixel(data), "nm"
            spectrum_range = [s * 1e9 for s in spectrum_range]
        except Exception:
            # Calculate wavelength in pixels if not given
            max_bw = data.shape[0] // 2
            min_bw = (max_bw - data.shape[0]) + 1
            spectrum_range, unit = range(min_bw, max_bw + 1), "px"
        if data.ndim == 1:
            logging.debug("Exporting spectrum data to CSV")

            if unit == "nm":
                # turn range to nm
                spectrum_tuples = [(s, d) for s, d in zip(spectrum_range, data)]
                headers = ['# wavelength (nm)', 'intensity']
            else:
                logging.info("Exporting spectrum without wavelength information")
                spectrum_tuples = data.reshape(data.shape[0], 1)
                headers = ['# intensity']

            with open(filename, 'w') as fd:
                csv_writer = csv.writer(fd)
                csv_writer.writerow(headers)
                csv_writer.writerows(spectrum_tuples)
        elif data.ndim == 2:
            # FIXME: For now it handles the rest of 2d data as spectrum-line
            logging.debug("Exporting spectrum-line data to CSV")

            # attach wavelength as first column
            wavelength_lin = numpy.array(spectrum_range)
            qz_masked = numpy.append(wavelength_lin.reshape(data.shape[0], 1), data, axis=1)
            # attach distance as first row
            line_length = data.shape[1] * data.metadata[model.MD_PIXEL_SIZE][1]
            distance_lin = numpy.linspace(0, line_length, data.shape[1])
            distance_lin.shape = (1, distance_lin.shape[0])
            distance_lin = numpy.append([[0]], distance_lin, axis=1)
            qz_masked = numpy.append(distance_lin, qz_masked, axis=0)
            data = model.DataArray(qz_masked, data.metadata)

            # Data should be in the form of (Y+1, X+1), with the first row and column the
            # distance_from_origin\wavelength
            with open(filename, 'w') as fd:
                csv_writer = csv.writer(fd)
                # Set the 'header' in the 0,0 element
                first_row = ['wavelength(' + unit + ')\distance_from_origin(m)'] + [d for d in data[0, 1:]]
                csv_writer.writerow(first_row)
                # dump the array
                csv_writer.writerows(data[1:, :])
        else:
            raise IOError("Unknown type of data to be exported as CSV")
    elif data.metadata.get(model.MD_ACQ_TYPE, None) == model.MD_AT_AR:
        logging.debug("Exporting AR data to CSV")
        # Data should be in the form of (Y+1, X+1), with the first row and column the angles
        with open(filename, 'w') as fd:
            csv_writer = csv.writer(fd)
            # Set the 'header' in the 0,0 element
            first_row = ['theta\phi(rad)'] + [d for d in data[0, 1:]]
            csv_writer.writerow(first_row)
            # dump the array
            csv_writer.writerows(data[1:, :])
Esempio n. 19
0
def _add_image_info(group, dataset, image, rgb=False):
    """
    Adds the basic metadata information about an image (scale and offset)
    group (HDF Group): the group that contains the dataset
    dataset (HDF Dataset): the image dataset
    image (DataArray >= 2D): image with metadata, the last 2 dimensions are Y and X (H,W)
    rgb (bool): If True, will consider the dimension of length 3 as channel
    """
    # Note: DimensionScale support is only part of h5py since v2.1

    # Dimensions
    if rgb:
        # expect 3 dimensions, with one of len==3 (=> C)
        left_dim = ["X", "Y"]
        for i, d in enumerate(dataset.dims):
            if image.shape[i] == 3:
                dname = "C"
            else:
                dname = left_dim.pop()
            d.label = dname
        l = 2  # trick to force to only put info for X and Y
    else:
        # The order of the dimension is reversed (the slowest changing is last)
        l = len(dataset.dims)
        dataset.dims[l - 1].label = "X"
        dataset.dims[l - 2].label = "Y"
        # support more dimensions if available:
        if l >= 3:
            dataset.dims[l - 3].label = "Z"
        if l >= 4:
            dataset.dims[l - 4].label = "T"
        if l >= 5:
            dataset.dims[l - 5].label = "C"

    # Offset
    if model.MD_POS in image.metadata:
        pos = image.metadata[model.MD_POS]
        group["XOffset"] = pos[0]
        _h5svi_set_state(group["XOffset"], ST_REPORTED)
        group["XOffset"].attrs["UNIT"] = "m"  # our extension
        group["YOffset"] = pos[1]
        _h5svi_set_state(group["YOffset"], ST_REPORTED)
        group["YOffset"].attrs["UNIT"] = "m"  # our extension

    # Time
    # TODO:
    # Surprisingly (for such a usual type), time storage is a mess in HDF5.
    # The documentation states that you can use H5T_TIME, but it is
    # "is not supported. If H5T_TIME is used, the resulting data will be readable
    # and modifiable only on the originating computing platform; it will not be
    # portable to other platforms.". It appears many format are allowed.
    # In addition in h5py, it's indicated as "deprecated" (although it seems
    # it was added in the latest version of HDF5).
    # Moreover, the only types available are 32 and 64 bits integers as number
    # of seconds since epoch. No past, no milliseconds, no time-zone.
    # So there are other proposals like in in F5
    # (http://sciviz.cct.lsu.edu/papers/2007/F5TimeSemantics.pdf) to represent
    # time with a float, a unit and an offset.
    # KNMI uses a string like this: DD-MON-YYYY;HH:MM:SS.sss.
    # (cf http://www.knmi.nl/~beekhuis/documents/publicdocs/ir2009-01_hdftag36.pdf)
    # So, to not solve anything, we save the date as a float representing the
    # Unix time. At least it makes Huygens happy.
    if model.MD_ACQ_DATE in image.metadata:
        # For a ISO 8601 string:
        #        ad = datetime.utcfromtimestamp(image.metadata[model.MD_ACQ_DATE])
        #        adstr = ad.strftime("%Y-%m-%dT%H:%M:%S.%f")
        #        group["TOffset"] = adstr
        group["TOffset"] = image.metadata[model.MD_ACQ_DATE]
        _h5svi_set_state(group["TOffset"], ST_REPORTED)
    else:
        group["TOffset"] = time.time()
        _h5svi_set_state(group["TOffset"], ST_DEFAULT)
    group["TOffset"].attrs["UNIT"] = "s"  # our extension

    # Scale
    if model.MD_PIXEL_SIZE in image.metadata:
        # DimensionScales are not clearly explained in the specification to
        # understand what they are supposed to represent. Surprisingly, there
        # is no official way to attach a unit.
        # Huygens seems to consider it's in m
        pxs = image.metadata[model.MD_PIXEL_SIZE]
        group["DimensionScaleX"] = pxs[0]
        group["DimensionScaleX"].attrs["UNIT"] = "m"  # our extension
        _h5svi_set_state(group["DimensionScaleX"], ST_REPORTED)
        group["DimensionScaleY"] = pxs[1]
        group["DimensionScaleY"].attrs["UNIT"] = "m"
        _h5svi_set_state(group["DimensionScaleY"], ST_REPORTED)
        # No clear what's the relation between this name and the label
        dataset.dims.create_scale(group["DimensionScaleX"], "X")
        dataset.dims.create_scale(group["DimensionScaleY"], "Y")
        dataset.dims[l - 1].attach_scale(group["DimensionScaleX"])
        dataset.dims[l - 2].attach_scale(group["DimensionScaleY"])

    # Unknown data, but SVI needs them to take the scales into consideration
    if l >= 3:
        group["ZOffset"] = 0.0
        _h5svi_set_state(group["ZOffset"], ST_DEFAULT)
        group["DimensionScaleZ"] = 1e-3  # m
        group["DimensionScaleZ"].attrs["UNIT"] = "m"
        dataset.dims.create_scale(group["DimensionScaleZ"], "Z")
        _h5svi_set_state(group["DimensionScaleZ"], ST_DEFAULT)
        dataset.dims[l - 3].attach_scale(group["DimensionScaleZ"])

        # Put here to please Huygens
        # Seems to be the coverslip position, ie, the lower and upper glass of
        # the sample. Not clear what's the relation with ZOffset.
        group["PrimaryGlassMediumInterfacePosition"] = 0.0  # m?
        _h5svi_set_state(group["PrimaryGlassMediumInterfacePosition"],
                         ST_DEFAULT)
        group["SecondaryGlassMediumInterfacePosition"] = 1.0  # m?
        _h5svi_set_state(group["SecondaryGlassMediumInterfacePosition"],
                         ST_DEFAULT)

    if l >= 4:
        group["DimensionScaleT"] = 1.0  # s
        group["DimensionScaleT"].attrs["UNIT"] = "s"
        # No clear what's the relation between this name and the label
        dataset.dims.create_scale(group["DimensionScaleT"], "T")
        _h5svi_set_state(group["DimensionScaleT"], ST_DEFAULT)
        dataset.dims[l - 4].attach_scale(group["DimensionScaleT"])

    # Wavelength (for spectrograms)
    if (l >= 5 and set(image.metadata.keys())
            & {model.MD_WL_LIST, model.MD_WL_POLYNOMIAL}):
        try:
            # polynomial of degree = 2 => linear, so use compact notation
            if (model.MD_WL_POLYNOMIAL in image.metadata
                    and len(image.metadata[model.MD_WL_POLYNOMIAL]) == 2):
                pn = image.metadata[model.MD_WL_POLYNOMIAL]
                group["COffset"] = pn[0]
                _h5svi_set_state(group["COffset"], ST_REPORTED)
                group["DimensionScaleC"] = pn[1]  # m
            else:
                wll = spectrum.get_wavelength_per_pixel(image)
                # list or polynomial of degree > 2 => store the values of each
                # pixel index explicitly. We follow another way to express
                # scaling in HDF5.
                group["DimensionScaleC"] = wll  # m

            group["DimensionScaleC"].attrs["UNIT"] = "m"
            dataset.dims.create_scale(group["DimensionScaleC"], "C")
            _h5svi_set_state(group["DimensionScaleC"], ST_REPORTED)
            dataset.dims[l - 5].attach_scale(group["DimensionScaleC"])
        except Exception:
            logging.warning("Failed to record wavelength information, "
                            "it will not be saved.")
Esempio n. 20
0
def export(filename, data):
    '''
    Write a CSV file:
        - If the given data is AR data then just dump the phi/data array
        - If the given data is spectrum data write it as series of wavelength/intensity
    filename (unicode): filename of the file to create (including path).
    data (model.DataArray): the data to export.
       Metadata is taken directly from the DA object.
    raises:
        IOError in case the spectrum does not contain wavelength metadata.
    '''
    if data.metadata.get(model.MD_ACQ_TYPE, None) == model.MD_AT_SPECTRUM:
        try:
            spectrum_range, unit = spectrum.get_wavelength_per_pixel(data), "nm"
            spectrum_range = [s * 1e9 for s in spectrum_range]
        except Exception:
            # Calculate wavelength in pixels if not given
            max_bw = data.shape[0] // 2
            min_bw = (max_bw - data.shape[0]) + 1
            spectrum_range, unit = range(min_bw, max_bw + 1), "px"
        if data.ndim == 1:
            logging.debug("Exporting spectrum data to CSV")

            if unit == "nm":
                # turn range to nm
                spectrum_tuples = [(s, d) for s, d in zip(spectrum_range, data)]
                headers = ['# wavelength (nm)', 'intensity']
            else:
                logging.info("Exporting spectrum without wavelength information")
                spectrum_tuples = data.reshape(data.shape[0], 1)
                headers = ['# intensity']

            with open(filename, 'w') as fd:
                csv_writer = csv.writer(fd)
                csv_writer.writerow(headers)
                csv_writer.writerows(spectrum_tuples)
        elif data.ndim == 2:
            # FIXME: For now it handles the rest of 2d data as spectrum-line
            logging.debug("Exporting spectrum-line data to CSV")

            # attach wavelength as first column
            wavelength_lin = numpy.linspace(spectrum_range[0], spectrum_range[-1], data.shape[0])
            qz_masked = numpy.append(wavelength_lin.reshape(data.shape[0], 1), data, axis=1)
            # attach distance as first row
            line_length = data.shape[1] * data.metadata[model.MD_PIXEL_SIZE][1]
            distance_lin = numpy.linspace(0, line_length, data.shape[1])
            distance_lin.shape = (1, distance_lin.shape[0])
            distance_lin = numpy.append([[0]], distance_lin, axis=1)
            qz_masked = numpy.append(distance_lin, qz_masked, axis=0)
            data = model.DataArray(qz_masked, data.metadata)

            # Data should be in the form of (Y+1, X+1), with the first row and column the
            # distance_from_origin\wavelength
            with open(filename, 'w') as fd:
                csv_writer = csv.writer(fd)
                # Set the 'header' in the 0,0 element
                first_row = ['wavelength(' + unit + ')\distance_from_origin(m)'] + [d for d in data[0, 1:]]
                csv_writer.writerow(first_row)
                # dump the array
                csv_writer.writerows(data[1:, :])
        else:
            raise IOError("Unknown type of data to be exported as CSV")
    elif data.metadata.get(model.MD_ACQ_TYPE, None) == model.MD_AT_AR:
        logging.debug("Exporting AR data to CSV")
        # Data should be in the form of (Y+1, X+1), with the first row and column the angles
        with open(filename, 'w') as fd:
            csv_writer = csv.writer(fd)
            # Set the 'header' in the 0,0 element
            first_row = ['theta\phi(rad)'] + [d for d in data[0, 1:]]
            csv_writer.writerow(first_row)
            # dump the array
            csv_writer.writerows(data[1:, :])