示例#1
0
    def __init__(self,
                 data=None,
                 coordset=None,
                 coordunits=None,
                 coordtitles=None,
                 **kwargs):

        super().__init__(data, **kwargs)

        self._parent = None

        # eventually set the coordinates with optional units and title

        if isinstance(coordset, CoordSet):
            self.set_coordset(**coordset)

        else:
            if coordset is None:
                coordset = [None] * self.ndim

            if coordunits is None:
                coordunits = [None] * self.ndim

            if coordtitles is None:
                coordtitles = [None] * self.ndim

            _coordset = []
            for c, u, t in zip(coordset, coordunits, coordtitles):
                if not isinstance(c, CoordSet):
                    if isinstance(c, LinearCoord):
                        coord = LinearCoord(c)
                    else:
                        coord = Coord(c)
                    if u is not None:
                        coord.units = u
                    if t is not None:
                        coord.title = t
                else:
                    if u:  # pragma: no cover
                        warning_(
                            "units have been set for a CoordSet, but this will be ignored "
                            "(units are only defined at the coordinate level")
                    if t:  # pragma: no cover
                        warning_(
                            "title will be ignored as they are only defined at the coordinates level"
                        )
                    coord = c

                _coordset.append(coord)

            if _coordset and set(_coordset) != {
                    Coord()
            }:  # if they are no coordinates do nothing
                self.set_coordset(*_coordset)
    def wrapper(dataset, **kwargs):

        # On which axis do we want to shift (get axis from arguments)
        axis, dim = dataset.get_axis(**kwargs, negative_axis=True)

        # output dataset inplace (by default) or not
        if not kwargs.pop("inplace", False):
            new = dataset.copy()  # copy to be sure not to modify this dataset
        else:
            new = dataset

        swapped = False
        if axis != -1:
            new.swapdims(axis, -1, inplace=True)  # must be done in  place
            swapped = True

        x = new.coordset[dim]
        if hasattr(x, "_use_time_axis"):
            x._use_time_axis = True  # we need to havze dimentionless or time units

        # get the lastcoord
        if x.unitless or x.dimensionless or x.units.dimensionality == "[time]":

            if not x.linear:
                # This method apply only to linear coordinates.
                # we try to linearize it
                x = LinearCoord(x)

            if not x.linear:
                raise TypeError("Coordinate x is not linearisable")

            data = method(new.data, **kwargs)
            new._data = data

            # we needs to increase the x coordinates array
            x._size = new._data.shape[-1]

            # update with the new td
            new.meta.td[-1] = x.size
            new.history = f"`{method.__name__}` shift performed on dimension `{dim}` with parameters: {kwargs}"

        else:
            error_(
                "zero-filling apply only to dimensions with [time] dimensionality or dimensionless coords\n"
                "The processing was thus cancelled"
            )

        # restore original data order if it was swapped
        if swapped:
            new.swapdims(axis, -1, inplace=True)  # must be done inplace

        return new
示例#3
0
def test_linearcoord():
    coord1 = Coord([1, 2.5, 4, 5])

    coord2 = Coord(np.array([1, 2.5, 4, 5]))
    assert coord2 == coord1

    coord3 = Coord(range(10))

    coord4 = Coord(np.arange(10))
    assert coord4 == coord3

    coord5 = coord4.copy()
    coord5 += 1
    assert np.all(coord5.data == coord4.data + 1)

    assert coord5 is not None
    coord5.linear = True

    coord6 = Coord(linear=True, offset=2.0, increment=2.0, size=10)
    assert np.all(coord6.data == (coord4.data + 1.0) * 2.)

    LinearCoord(offset=2.0, increment=2.0, size=10)

    coord0 = LinearCoord.linspace(200.,
                                  300.,
                                  3,
                                  labels=['cold', 'normal', 'hot'],
                                  units="K",
                                  title='temperature')
    coord1 = LinearCoord.linspace(0.,
                                  60.,
                                  100,
                                  labels=None,
                                  units="minutes",
                                  title='time-on-stream')
    coord2 = LinearCoord.linspace(4000.,
                                  1000.,
                                  100,
                                  labels=None,
                                  units="cm^-1",
                                  title='wavenumber')

    assert coord0.size == 3
    assert coord1.size == 100
    assert coord2.size == 100

    coordc = coord0.copy()
    assert coord0 == coordc

    coordc = coord1.copy()
    assert coord1 == coordc
示例#4
0
def test_linearcoord_add_units_with_different_scale():
    d1 = LinearCoord.arange(3.0, units="m")
    d2 = LinearCoord.arange(3.0, units="cm")

    x = d1 + 1.0 * ur.cm
    assert np.around(x.data[1], 2) == 1.01

    x = d1 + d2
    assert np.around(x.data[1], 2) == 1.01
    x = d2 + d1
    assert np.around(x.data[1], 2) == 101.0
    d1 += d2
    assert np.around(d1.data[1], 2) == 1.01
    d2 += d1
    assert d2.data[1] == 102.0
示例#5
0
    def wrapper(cls, xinput, *args, **kwargs):

        returntype = "ndarray"
        x = xinput.copy()

        x_units = None
        if hasattr(xinput, "units"):
            x_units = xinput.units
            if isinstance(xinput, Coord):
                x = xinput.data
                returntype = "NDDataset"
            else:
                x = xinput.m

        # get args or their equivalent in kwargs and eventually convert units.
        newargs = []

        for index, param in enumerate(cls.args):
            newargs.append(
                kwargs.get(param, args[index] if len(args) > index else 0))

        for index, arg in enumerate(newargs):
            # adapt units
            if cls.args[index] in ["width", "pos"]:
                # implicit units: those of x else rescale
                newargs[index] = _convert_to_units(arg, x_units)

        ampl_units = None
        if hasattr(newargs[0], "units"):
            ampl_units = newargs[0].units
            newargs[0] = newargs[0].m

        print(newargs)
        _data = func(cls, x, *newargs)

        if returntype == "NDDataset":
            res = NDDataset(_data, units=ampl_units)
            res.x = LinearCoord(xinput)
            res.name = cls.__class__.__name__.split("model")[0]
            res.title = "intensity"

        else:
            res = _data
            if ampl_units:
                res = res * ampl_units

        return res
示例#6
0
def test_coordset_copy(coord0, coord1):
    coord2 = LinearCoord.linspace(200.,
                                  300.,
                                  3,
                                  units="K",
                                  title='temperature')

    coordsa = CoordSet(coord0, coord1, coord2)

    coordsb = coordsa.copy()
    assert coordsa == coordsb
    assert coordsa is not coordsb
    assert coordsa(1) == coordsb(1)
    assert coordsa(1).name == coordsb(1).name

    # copy
    coords = CoordSet(coord0, coord0.copy())
    coords1 = coords[:]
    assert coords is not coords1

    import copy
    coords2 = copy.deepcopy(coords)
    assert coords == coords2
示例#7
0
def _read_srs(*args, **kwargs):
    dataset, filename = args
    frombytes = kwargs.get("frombytes", False)

    return_bg = kwargs.get("return_bg", False)

    if frombytes:
        # in this case, filename is actually a byte content
        fid = io.BytesIO(filename)  # pragma: no cover
    else:
        fid = open(filename, "rb")

    # determine whether the srs is reprocessed. At pos=292 (hex:124) appears a difference between
    # and reprocessed series
    fid.seek(292)
    key = _fromfile(fid, dtype="uint8", count=16)[0]
    if key == 39:  # (hex: 27)
        is_reprocessed = False
    elif key == 15:  # (hex = 0F)
        is_reprocessed = True
    # if key == 72 (hex:48), could be TGA

    """ At pos=304 (hex:130) is the position of the '02' key for series. Herte we don't use it.
    Instead, we use the following sequence :
    b'\x02\x00\x00\x00\x18\x00\x00\x00\x00\x00\x48\x43\x00\x50\x43\x47'
    which appears 3 times in rapid-scan srs. They are used to assert the srs file is rapid_scan
    and to locate headers and data:
    - The 1st one is located 152 bytes after the series header position
    - The 2nd one is located 152 bytes before the background header position and
       56 bytes before either the background data / or the background title and infos
       followed by the background data
    - The 3rd one is located 64 bytes before the series data (spectre/ifg names and
    intensities"""

    sub = b"\x02\x00\x00\x00\x18\x00\x00\x00\x00\x00\x48\x43\x00\x50\x43\x47"

    # find the 3 starting indexes of sub. we will use the 1st (-> series info),
    # the 2nd (-> background) and the 3rd (-> data)
    fid.seek(0)
    bytestring = fid.read()
    start = 0
    index = []
    while start != -1:
        i = bytestring.find(sub, start + 1)
        index.append(i)
        start = i
    index = np.array(index[:-1])

    if len(index) != 3:
        raise NotImplementedError("Only implemented for rapidscan")

    index += [-152, -152, 60]

    # read series data, except if the user asks for the background
    if not return_bg:
        info = _read_header(fid, index[0])
        # container for names and data
        names = []
        data = np.zeros((info["ny"], info["nx"]))

        # now read the spectra/interferogram names and data
        # the first one....
        pos = index[2]
        names.append(_readbtext(fid, pos, 256))
        pos += 84
        fid.seek(pos)
        data[0, :] = _fromfile(fid, dtype="float32", count=info["nx"])[:]
        pos += info["nx"] * 4
        # ... and the remaining ones:
        for i in np.arange(info["ny"])[1:]:
            pos += 16
            names.append(_readbtext(fid, pos, 256))
            pos += 84
            fid.seek(pos)
            data[i, :] = _fromfile(fid, dtype="float32", count=info["nx"])[:]
            pos += info["nx"] * 4

        # now get series history
        if not is_reprocessed:
            history = info["history"]
        else:
            # In reprocessed series the updated "DATA PROCESSING HISTORY" is located right after
            # the following 16 byte sequence:
            sub = b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
            pos = bytestring.find(sub) + 16
            history = _readbtext(fid, pos, None)

    # read the background if the user asked for it.
    if return_bg:

        # First get background info
        info = _read_header(fid, index[1])

        if "background_name" not in info.keys():
            # it is a short header
            fid.seek(index[1] + 208)
            data = _fromfile(fid, dtype="float32", count=info["nx"])
        else:
            # longer header, in such case the header indicates a spectrum
            # but the data are those of an ifg... For now need more examples
            return None

    # Create NDDataset Object for the series
    if not return_bg:
        dataset = NDDataset(data)
    else:
        dataset = NDDataset(np.expand_dims(data, axis=0))

    # in case part of the spectra/ifg has been blanked:
    dataset.mask = np.isnan(dataset.data)

    dataset.units = info["units"]
    dataset.title = info["title"]
    dataset.origin = "omnic"

    # now add coordinates
    spacing = (info["lastx"] - info["firstx"]) / (info["nx"] - 1)
    _x = LinearCoord(
        offset=info["firstx"],
        increment=spacing,
        size=info["nx"],
        title=info["xtitle"],
        units=info["xunits"],
    )

    # specific infos for series data
    if not return_bg:
        dataset.name = info["name"]
        _y = Coord(
            np.around(np.linspace(info["firsty"], info["lasty"], info["ny"]), 3),
            title="Time",
            units="minute",
            labels=names,
        )

    else:
        _y = Coord()

    dataset.set_coordset(y=_y, x=_x)

    # Set origin, description and history
    dataset.origin = "omnic"
    dataset.description = kwargs.get("description", "Dataset from omnic srs file.")

    if "history" in locals():
        dataset.history.append(
            "Omnic 'DATA PROCESSING HISTORY' :\n"
            "--------------------------------\n" + history
        )
    dataset.history.append(
        str(datetime.now(timezone.utc)) + ": imported from srs file " + str(filename)
    )

    dataset.meta.laser_frequency = info["reference_frequency"] * ur("cm^-1")
    dataset.meta.collection_length = info["collection_length"] * ur("s")

    if dataset.x.units is None and dataset.x.title == "data points":
        # interferogram
        dataset.meta.interferogram = True
        dataset.meta.td = list(dataset.shape)
        dataset.x._zpd = int(np.argmax(dataset)[-1])  # zero path difference
        dataset.x.set_laser_frequency()
        dataset.x._use_time_axis = (
            False  # True to have time, else it will  be optical path difference
        )

        # uncomment below to load the last datafield has the same dimension as the time axis
        # its function is not known. related to Grams-schmidt ?

        # pos = _nextline(pos)
        # found = False
        # while not found:
        #     pos += 16
        #     f.seek(pos)
        #     key = _fromfile(f, dtype='uint8', count=1)
        #     if key == 1:
        #         pos += 4
        #         f.seek(pos)
        #         X = _fromfile(f, dtype='float32', count=info['ny'])
        #         found = True
        #
        # X = NDDataset(X)
        # _x = Coord(np.around(np.linspace(0, info['ny']-1, info['ny']), 0),
        #            title='time',
        #            units='minutes')
        # X.set_coordset(x=_x)
        # X.name = '?'
        # X.title = '?'
        # X.description = 'unknown'
        # X.history = str(datetime.now(timezone.utc)) + ':imported from srs

    fid.close()

    return dataset
示例#8
0
def _read_spa(*args, **kwargs):
    dataset, filename = args
    content = kwargs.get("content", False)

    if content:
        fid = io.BytesIO(content)
    else:
        fid = open(filename, "rb")

    return_ifg = kwargs.get("return_ifg", None)

    # Read name:
    # The name  starts at position hex 1e = decimal 30. Its max length
    # is 256 bytes. It is the original filename under which the spectrum has
    # been saved: it won't match with the actual filename if a subsequent
    # renaming has been done in the OS.
    spa_name = _readbtext(fid, 30, 256)

    # The acquisition date (GMT) is at hex 128 = decimal 296.
    # Second since 31/12/1899, 00:00
    fid.seek(296)
    timestamp = _fromfile(fid, dtype="uint32", count=1)
    acqdate = datetime(1899, 12, 31, 0, 0, tzinfo=timezone.utc) + timedelta(
        seconds=int(timestamp)
    )
    acquisitiondate = acqdate

    # Transform back to timestamp for storage in the Coord object
    # use datetime.fromtimestamp(d, timezone.utc)) to transform back to datetime object
    timestamp = acqdate.timestamp()

    # From hex 120 = decimal 304, the spectrum is described
    # by a block of lines starting with "key values",
    # for instance hex[02 6a 6b 69 1b 03 82] -> dec[02 106  107 105 27 03 130]
    # Each of these lines provides positions of data and metadata in the file:
    #
    #     key: hex 02, dec  02: position of spectral header (=> nx,
    #                                 firstx, lastx, nscans, nbkgscans)
    #     key: hex 03, dec  03: intensity position
    #     #     key: hex 04, dec  04: user text position (custom info, can be present
    #                           several times. The text length is five bytes later)
    #     key: hex 1B, dec  27: position of History text, The text length
    #                           is five bytes later
    #     key: hex 53, dec  83: probably not a position, present when 'Retrieved from library'
    #     key: hex 64, dec 100: ?
    #     key: hex 66  dec 102: sample interferogram
    #     key: hex 67  dec 103: background interferogram
    #     key: hex 69, dec 105: ?
    #     key: hex 6a, dec 106: ?
    #     key: hex 80, dec 128: ?
    #     key: hex 82, dec 130: position of 'Experiment Information', The text length
    #                           is five bytes later. The block gives Experiment filename (at +10)
    #                           Experiment title (+90), custom text (+254), accessory name (+413)
    #     key: hex 92, dec 146: position of 'custom infos', The text length
    #                           is five bytes later.
    #
    # The line preceding the block start with '01' or '0A'
    # The lines after the block generally start with '00', except in few cases where
    # they start by '01'. In such cases, the '53' key is also present
    # (before the '1B').

    # scan "key values"
    pos = 304
    spa_comments = []  # several custom comments can be present
    while "continue":
        fid.seek(pos)
        key = _fromfile(fid, dtype="uint8", count=1)

        # print(key, end=' ; ')

        if key == 2:
            # read the position of the header
            fid.seek(pos + 2)
            pos_header = _fromfile(fid, dtype="uint32", count=1)
            info = _read_header(fid, pos_header)

        elif key == 3 and return_ifg is None:
            intensities = _getintensities(fid, pos)

        elif key == 4:
            fid.seek(pos + 2)
            comments_pos = _fromfile(fid, "uint32", 1)
            fid.seek(pos + 6)
            comments_len = _fromfile(fid, "uint32", 1)
            fid.seek(comments_pos)
            spa_comments.append(fid.read(comments_len).decode("latin-1", "replace"))

        elif key == 27:
            fid.seek(pos + 2)
            history_pos = _fromfile(fid, "uint32", 1)
            fid.seek(pos + 6)
            history_len = _fromfile(fid, "uint32", 1)
            spa_history = _readbtext(fid, history_pos, history_len)

        elif key == 102 and return_ifg == "sample":
            s_ifg_intensities = _getintensities(fid, pos)

        elif key == 103 and return_ifg == "background":
            b_ifg_intensities = _getintensities(fid, pos)

        elif key == 00 or key == 1:
            break

        pos += 16

    fid.close()

    if (return_ifg == "sample" and "s_ifg_intensities" not in locals()) or (
        return_ifg == "background" and "b_ifg_intensities" not in locals()
    ):
        info_("No interferogram found, read_spa returns None")
        return None
    elif return_ifg == "sample":
        intensities = s_ifg_intensities
    elif return_ifg == "background":
        intensities = b_ifg_intensities
    # load intensity into the  NDDataset
    dataset.data = np.array(intensities[np.newaxis], dtype="float32")

    if return_ifg == "background":
        title = "sample acquisition timestamp (GMT)"  # bckg acquisition date is not known for the moment...
    else:
        title = "acquisition timestamp (GMT)"  # no ambiguity here

    _y = Coord(
        [timestamp],
        title=title,
        units="s",
        labels=([acquisitiondate], [filename]),
    )

    # useful when a part of the spectrum/ifg has been blanked:
    dataset.mask = np.isnan(dataset.data)

    if return_ifg is None:
        default_description = f"# Omnic name: {spa_name}\n# Filename: {filename.name}"
        dataset.units = info["units"]
        dataset.title = info["title"]

        # now add coordinates
        nx = info["nx"]
        firstx = info["firstx"]
        lastx = info["lastx"]
        xunit = info["xunits"]
        xtitle = info["xtitle"]

        spacing = (lastx - firstx) / (nx - 1)

        _x = LinearCoord(
            offset=firstx, increment=spacing, size=nx, title=xtitle, units=xunit
        )

    else:  # interferogram
        if return_ifg == "sample":
            default_description = (
                f"# Omnic name: {spa_name} : sample IFG\n # Filename: {filename.name}"
            )
        else:
            default_description = f"# Omnic name: {spa_name} : background IFG\n # Filename: {filename.name}"
        spa_name += ": Sample IFG"
        dataset.units = "V"
        dataset.title = "detector signal"
        _x = LinearCoord(
            offset=0,
            increment=1,
            size=len(intensities),
            title="data points",
            units=None,
        )

    dataset.set_coordset(y=_y, x=_x)
    dataset.name = spa_name  # to be consistent with omnic behaviour
    dataset.filename = str(filename)

    # Set origin, description, history, date
    # Omnic spg file don't have specific "origin" field stating the oirigin of the data

    dataset.description = kwargs.get("description", default_description) + "\n"
    if len(spa_comments) > 1:
        dataset.description += "# Comments from Omnic:\n"
        for comment in spa_comments:
            dataset.description += comment + "\n---------------------\n"

    dataset.history = str(datetime.now(timezone.utc)) + ":imported from spa file(s)"

    if "spa_history" in locals():
        if len("spa_history".strip(" ")) > 0:
            dataset.history = (
                "Data processing history from Omnic :\n------------------------------------\n"
                + spa_history
            )

    dataset._date = datetime.now(timezone.utc)

    dataset.meta.collection_length = info["collection_length"] / 100 * ur("s")
    dataset.meta.optical_velocity = info["optical_velocity"]
    dataset.meta.laser_frequency = info["reference_frequency"] * ur("cm^-1")

    if dataset.x.units is None and dataset.x.title == "data points":
        # interferogram
        dataset.meta.interferogram = True
        dataset.meta.td = list(dataset.shape)
        dataset.x._zpd = int(np.argmax(dataset)[-1])
        dataset.x.set_laser_frequency()
        dataset.x._use_time_axis = (
            False  # True to have time, else it will be optical path difference
        )

    return dataset
示例#9
0
def _read_spg(*args, **kwargs):
    # read spg file

    dataset, filename = args
    sortbydate = kwargs.pop("sortbydate", True)
    content = kwargs.get("content", False)

    if content:
        fid = io.BytesIO(content)
    else:
        fid = open(filename, "rb")

    # Read name:
    # The name starts at position hex 1e = decimal 30. Its max length
    # is 256 bytes. It is the original filename under which the group has been saved: it
    # won't match with the actual filename if a subsequent renaming has been done in the OS.

    spg_title = _readbtext(fid, 30, 256)

    # Count the number of spectra
    # From hex 120 = decimal 304, individual spectra are described
    # by blocks of lines starting with "key values",
    # for instance hex[02 6a 6b 69 1b 03 82] -> dec[02 106  107 105 27 03 130]
    # Each of these lines provides positions of data and metadata in the file:
    #
    #     key: hex 02, dec  02: position of spectral header (=> nx, firstx,
    #     lastx, nscans, nbkgscans)
    #     key: hex 03, dec  03: intensity position
    #     key: hex 04, dec  04: user text position
    #     key: hex 1B, dec  27: position of History text
    #     key: hex 64, dec 100: ?
    #     key: hex 66  dec 102: sample interferogram
    #     key: hex 67  dec 103: background interferogram
    #     key: hex 69, dec 105: ?
    #     key: hex 6a, dec 106: ?
    #     key: hex 6b, dec 107: position of spectrum title, the acquisition
    #     date follows at +256(dec)
    #     key: hex 80, dec 128: ?
    #     key: hex 82, dec 130: rotation angle ?
    #
    # the number of line per block may change from file to file but the total
    # number of lines is given at hex 294, hence allowing counting the
    # number of spectra:

    # read total number of lines
    fid.seek(294)
    nlines = _fromfile(fid, "uint16", count=1)

    # read "key values"
    pos = 304
    keys = np.zeros(nlines)
    for i in range(nlines):
        fid.seek(pos)
        keys[i] = _fromfile(fid, dtype="uint8", count=1)
        pos = pos + 16

    # the number of occurrences of the key '02' is number of spectra
    nspec = np.count_nonzero((keys == 2))

    if nspec == 0:  # pragma: no cover
        raise IOError(
            "Error : File format not recognized" " - information markers not found"
        )

    # container to hold values
    nx, firstx, lastx = (
        np.zeros(nspec, "int"),
        np.zeros(nspec, "float"),
        np.zeros(nspec, "float"),
    )
    xunits = []
    xtitles = []
    units = []
    titles = []

    # Extracts positions of '02' keys
    key_is_02 = keys == 2  # ex: [T F F F F T F (...) F T ....]'
    indices02 = np.nonzero(key_is_02)  # ex: [1 9 ...]
    position02 = (
        304 * np.ones(len(indices02[0]), dtype="int") + 16 * indices02[0]
    )  # ex: [304 432 ...]

    for i in range(nspec):
        # read the position of the header
        fid.seek(position02[i] + 2)
        pos_header = _fromfile(fid, dtype="uint32", count=1)
        # get infos
        info = _read_header(fid, pos_header)
        nx[i] = info["nx"]
        firstx[i] = info["firstx"]
        lastx[i] = info["lastx"]
        xunits.append(info["xunits"])
        xtitles.append(info["xtitle"])
        units.append(info["units"])
        titles.append(info["title"])

    # check the consistency of xaxis and data units
    if np.ptp(nx) != 0:  # pragma: no cover
        raise ValueError(
            "Error : Inconsistent data set -"
            " number of wavenumber per spectrum should be "
            "identical"
        )
    elif np.ptp(firstx) != 0:  # pragma: no cover
        raise ValueError(
            "Error : Inconsistent data set - " "the x axis should start at same value"
        )
    elif np.ptp(lastx) != 0:  # pragma: no cover
        raise ValueError(
            "Error : Inconsistent data set -" " the x axis should end at same value"
        )
    elif len(set(xunits)) != 1:  # pragma: no cover
        raise ValueError(
            "Error : Inconsistent data set - " "data units should be identical"
        )
    elif len(set(units)) != 1:  # pragma: no cover
        raise ValueError(
            "Error : Inconsistent data set - " "x axis units should be identical"
        )
    data = np.ndarray((nspec, nx[0]), dtype="float32")

    # Now the intensity data

    # Extracts positions of '03' keys
    key_is_03 = keys == 3
    indices03 = np.nonzero(key_is_03)
    position03 = 304 * np.ones(len(indices03[0]), dtype="int") + 16 * indices03[0]

    # Read number of spectral intensities
    for i in range(nspec):
        data[i, :] = _getintensities(fid, position03[i])

    # Get spectra titles & acquisition dates:
    # container to hold values
    spectitles, acquisitiondates, timestamps = [], [], []

    # Extract positions of '6B' keys (spectra titles & acquisition dates)
    key_is_6B = keys == 107
    indices6B = np.nonzero(key_is_6B)
    position6B = 304 * np.ones(len(indices6B[0]), dtype="int") + 16 * indices6B[0]

    # Read spectra titles and acquisition date
    for i in range(nspec):
        # determines the position of informatioon
        fid.seek(position6B[i] + 2)  # go to line and skip 2 bytes
        spa_title_pos = _fromfile(fid, "uint32", 1)

        # read filename
        spa_title = _readbtext(fid, spa_title_pos, 256)
        spectitles.append(spa_title)

        # and the acquisition date
        fid.seek(spa_title_pos + 256)
        timestamp = _fromfile(fid, dtype="uint32", count=1)
        # since 31/12/1899, 00:00
        acqdate = datetime(1899, 12, 31, 0, 0, tzinfo=timezone.utc) + timedelta(
            seconds=int(timestamp)
        )
        acquisitiondates.append(acqdate)
        timestamp = acqdate.timestamp()
        # Transform back to timestamp for storage in the Coord object
        # use datetime.fromtimestamp(d, timezone.utc))
        # to transform back to datetime object

        timestamps.append(timestamp)

        # Not used at present
        # -------------------
        # extract positions of '1B' codes (history text), sometimes absent,
        # e.g. peakresolve)
        #  key_is_1B = (keys == 27)
        #  indices1B =  # np.nonzero(key_is_1B)
        #  position1B = 304 * np.ones(len(indices1B[0]), dtype='int') + 16 * indices6B[0]
        #  if len(position1B) != 0:  # read history texts
        #     for j in range(nspec):  determine the position of information
        #        f.seek(position1B[j] + 2)  #
        #        history_pos = _fromfile(f,  'uint32', 1)
        #        history =  _readbtext(f, history_pos[0])
        #        allhistories.append(history)

    fid.close()

    # Create Dataset Object of spectral content
    dataset.data = data
    dataset.units = units[0]
    dataset.title = titles[0]
    dataset.name = filename.stem
    dataset.filename = filename

    # now add coordinates
    # _x = Coord(np.around(np.linspace(firstx[0], lastx[0], nx[0]), 3),
    #           title=xtitles[0], units=xunits[0])
    spacing = (lastx[0] - firstx[0]) / int(nx[0] - 1)
    _x = LinearCoord(
        offset=firstx[0],
        increment=spacing,
        size=int(nx[0]),
        title=xtitles[0],
        units=xunits[0],
    )

    _y = Coord(
        timestamps,
        title="acquisition timestamp (GMT)",
        units="s",
        labels=(acquisitiondates, spectitles),
    )

    dataset.set_coordset(y=_y, x=_x)

    # Set description, date and history
    # Omnic spg file don't have specific "origin" field stating the oirigin of the data
    dataset.description = kwargs.get(
        "description", f"Omnic title: {spg_title}\nOmnic " f"filename: {filename}"
    )

    dataset._date = datetime.now(timezone.utc)

    dataset.history = str(dataset.date) + ":imported from spg file {} ; ".format(
        filename
    )
    if sortbydate:
        dataset.sort(dim="y", inplace=True)
        dataset.history = str(dataset.date) + ":sorted by date"

    # debug_("end of reading")

    return dataset
示例#10
0
def _read_txt(*args, **kwargs):
    # read Labspec *txt files or series

    dataset, filename = args
    content = kwargs.get('content', False)

    if content:
        fid = io.StringIO(content)
        # TODO: get the l list of string

    else:
        fid = open(filename, 'r', encoding='utf-8')
        try:
            lines = fid.readlines()
        except UnicodeDecodeError:
            fid = open(filename, 'r', encoding='latin-1')
            lines = fid.readlines()
            fid.close()

    # Metadata
    meta = Meta()

    i = 0
    while lines[i].startswith('#'):
        key, val = lines[i].split('=')
        key = key[1:]
        if key in meta.keys():
            key = f'{key} {i}'
        meta[key] = val.strip()
        i += 1

    # read spec
    rawdata = np.genfromtxt(lines[i:], delimiter='\t')

    # populate the dataset
    if rawdata.shape[1] == 2:
        data = rawdata[:, 1][np.newaxis]
        _x = Coord(rawdata[:, 0], title='Raman shift', units='1/cm')
        _y = Coord(None, title='Time', units='s')
        date_acq, _y = _transf_meta(_y, meta)

    else:
        data = rawdata[1:, 1:]
        _x = Coord(rawdata[0, 1:], title='Raman shift', units='1/cm')
        _y = Coord(rawdata[1:, 0], title='Time', units='s')
        date_acq, _y = _transf_meta(_y, meta)

    # try to transform to linear coord
    _x.linear = True

    # if success linear should still be True
    if _x.linear:
        _x = LinearCoord(_x)

    # set dataset metadata
    dataset.data = data
    dataset.set_coordset(y=_y, x=_x)
    dataset.title = 'raman Intensity'
    dataset.units = 'absorbance'
    dataset.name = filename.stem
    dataset.meta = meta

    # date_acq is Acquisition date at start (first moment of acquisition)
    dataset.description = 'Spectrum acquisition : ' + str(date_acq)

    # Set the NDDataset date
    dataset._date = datetime.datetime.now(datetime.timezone.utc)
    dataset._modified = dataset.date

    # Set origin, description and history
    dataset.history = f'{dataset.date}:imported from LabSpec6 text file {filename}'

    return dataset
示例#11
0
def test_linearcoord():
    coord1 = Coord([1, 2.5, 4, 5])

    coord2 = Coord(np.array([1, 2.5, 4, 5]))
    assert coord2 == coord1

    coord3 = Coord(range(10))

    coord4 = Coord(np.arange(10))
    assert coord4 == coord3

    coord5 = coord4.copy()
    coord5 += 1
    assert np.all(coord5.data == coord4.data + 1)

    assert coord5 is not None
    coord5.linear = True

    coord6 = Coord(linear=True, offset=2.0, increment=2.0, size=10)
    assert np.all(coord6.data == (coord4.data + 1.0) * 2.0)

    LinearCoord(offset=2.0, increment=2.0, size=10)

    coord0 = LinearCoord.linspace(
        200.0,
        300.0,
        3,
        labels=["cold", "normal", "hot"],
        units="K",
        title="temperature",
    )
    coord1 = LinearCoord.linspace(0.0,
                                  60.0,
                                  100,
                                  labels=None,
                                  units="minutes",
                                  title="time-on-stream")
    coord2 = LinearCoord.linspace(4000.0,
                                  1000.0,
                                  100,
                                  labels=None,
                                  units="cm^-1",
                                  title="wavenumber")

    assert coord0.size == 3
    assert coord1.size == 100
    assert coord2.size == 100

    coordc = coord0.copy()
    assert coord0 == coordc

    coordc = coord1.copy()
    assert coord1 == coordc

    assert_approx_equal(coord1.spacing.m, 0.606060606)

    assert coord1.author is None
    assert not coord1.history

    assert not coord1.descendant
    assert coord2.descendant

    assert coord1.is_1d

    assert coord0.transpose() == coord0
示例#12
0
        def item_to_attr(obj, dic):

            for key, val in dic.items():

                try:
                    if "readonly" in dic.keys() and key in [
                            "readonly", "name"
                    ]:
                        # case of the meta and preferences
                        pass

                    elif hasattr(obj, f"_{key}"):
                        # use the hidden attribute if it exists
                        key = f"_{key}"

                    if val is None:
                        pass

                    elif key in ["_meta", "_ranges", "_preferences"]:
                        setattr(obj, key, item_to_attr(getattr(obj, key), val))

                    elif key in ["_coordset"]:
                        _coords = []
                        for v in val["coords"]:
                            if "data" in v:
                                # coords
                                _coords.append(item_to_attr(Coord(), v))
                            elif "coords" in v:
                                # likely a coordset (multicoordinates)
                                if v["is_same_dim"]:
                                    _mcoords = []
                                    for mv in v["coords"]:
                                        if "data" in mv:
                                            # coords
                                            _mcoords.append(
                                                item_to_attr(Coord(), mv))
                                        else:
                                            # likely a linearcoord
                                            _mcoords.append(
                                                item_to_attr(
                                                    LinearCoord(), mv))
                                    cs = CoordSet(*_mcoords[::-1],
                                                  name=v["name"])
                                    _coords.append(cs)
                                else:
                                    raise ValueError(
                                        "Invalid : not a multicoordinate")
                            else:
                                # likely a linearcoord
                                _coords.append(item_to_attr(LinearCoord(), v))

                        coords = dict((c.name, c) for c in _coords)
                        obj.set_coordset(coords)
                        obj._name = val["name"]
                        obj._references = val["references"]

                    elif key in ["_datasets"]:
                        # datasets = [item_to_attr(NDDataset(name=k),
                        # v) for k, v in val.items()]
                        datasets = [
                            item_to_attr(NDDataset(), js) for js in val
                        ]
                        obj.datasets = datasets

                    elif key in ["_projects"]:
                        projects = [item_to_attr(Project(), js) for js in val]
                        obj.projects = projects

                    elif key in ["_scripts"]:
                        scripts = [item_to_attr(Script(), js) for js in val]
                        obj.scripts = scripts

                    elif key in ["_parent"]:
                        # automatically set
                        pass

                    else:
                        if isinstance(val, TYPE_BOOL) and key == "_mask":
                            val = np.bool_(val)
                        if isinstance(obj, NDDataset) and key == "_filename":
                            obj.filename = val  # This is a hack because for some reason fileame attribute is not
                            # found ????
                        else:
                            setattr(obj, key, val)

                except Exception as e:
                    raise TypeError(f"for {key} {e}")

            return obj
示例#13
0
def _read_spc(*args, **kwargs):
    dataset, filename = args
    content = kwargs.get("content", False)

    if content:
        fid = io.BytesIO(content)
    else:
        fid = open(filename, "rb")
        content = fid.read()

    # extract version
    _, Fversn = struct.unpack("cc".encode("utf8"), content[:2])

    # check spc version
    if Fversn == b"\x4b":
        endian = "little"
        head_format = "<cccciddicccci9s9sh32s130s30siicchf48sfifc187s"
        logstc_format = "<iiiiic"
        float32_dtype = "<f4"
        int16_dtype = "<i2"
        int32_dtype = "<i4"
    elif Fversn == b"\x4c":
        endian = "big"
        head_format = ">cccciddicccci9s9sh32s130s30siicchf48sfifc187s"
        logstc_format = ">iiiiic"
        float32_dtype = ">f4"
        int16_dtype = ">i2"
        int32_dtype = ">i4"
    else:
        raise NotImplementedError(
            f"The version {Fversn} is not yet supported. "
            f"Currently supported versions are b'\x4b' and b'\x4c'.")

    # extract the header (see: Galactic Universal Data Format Specification 9/4/97)
    # from SPC.H Header File:
    # typedef struct
    # {
    # BYTE ftflgs; /* Flag bits defined below */
    # BYTE fversn; /* 0x4B=> new LSB 1st, 0x4C=> new MSB 1st, 0x4D=> old format */
    # BYTE fexper; /* Instrument technique code (see below) */
    # char fexp; /* Fraction scaling exponent integer (80h=>float) */
    # DWORD fnpts; /* Integer number of points (or TXYXYS directory position) */
    # double ffirst; /* Floating X coordinate of first point */
    # double flast; /* Floating X coordinate of last point */
    # DWORD fnsub; /* Integer number of subfiles (1 if not TMULTI) */
    # BYTE fxtype; /* Type of X axis units (see definitions below) */
    # BYTE fytype; /* Type of Y axis units (see definitions below) */
    # BYTE fztype; /* Type of Z axis units (see definitions below) */
    # BYTE fpost; /* Posting disposition (see GRAMSDDE.H) */
    # DWORD fdate; /* Date/Time LSB: min=6b,hour=5b,day=5b,month=4b,year=12b */
    # char fres[9]; /* Resolution description text (null terminated) */
    # char fsource[9]; /* Source instrument description text (null terminated) */
    # WORD fpeakpt; /* Peak point number for interferograms (0=not known) */
    # float fspare[8]; /* Used for Array Basic storage */
    # char fcmnt[130]; /* Null terminated comment ASCII text string */
    # char fcatxt[30]; /* X,Y,Z axis label strings if ftflgs=TALABS */
    # DWORD flogoff; /* File offset to log block or 0 (see above) */
    # DWORD fmods; /* File Modification Flags (see below: 1=A,2=B,4=C,8=D..) */
    # BYTE fprocs; /* Processing code (see GRAMSDDE.H) */
    # BYTE flevel; /* Calibration level plus one (1 = not calibration data) */
    # WORD fsampin; /* Sub-method sample injection number (1 = first or only ) */
    # float ffactor; /* Floating data multiplier concentration factor (IEEE-32) */
    # char fmethod[48]; /* Method/program/data filename w/extensions comma list */
    # float fzinc; /* Z subfile increment (0 = use 1st subnext-subfirst) */
    # DWORD fwplanes; /* Number of planes for 4D with W dimension (0=normal) */
    # float fwinc; /* W plane increment (only if fwplanes is not 0) */
    # BYTE fwtype; /* Type of W axis units (see definitions below) */
    # char freserv[187]; /* Reserved (must be set to zero) */
    # } SPCHDR;

    (
        Ftflgs,
        Fversn,
        Fexper,
        Fexp,
        Fnpts,
        Ffirst,
        Flast,
        Fnsub,
        Fxtype,
        Fytype,
        Fztype,
        Fpost,
        Fdate,
        Fres,
        Fsource,
        Fpeakpt,
        Fspare,
        Fcmnt,
        Fcatxt,
        Flogoff,
        Fmods,
        Fprocs,
        Flevel,
        Fsampin,
        Ffactor,
        Fmethod,
        Fzinc,
        Fwplanes,
        Fwinc,
        Fwtype,
        Freserv,
    ) = struct.unpack(head_format.encode("utf8"), content[:512])

    # check compatibility with current implementation
    if Fnsub > 1:
        raise NotImplementedError(
            "spc reader not implemented yet for multifiles. If you need it, please "
            "submit a feature request on spectrochempy repository :-)")

    # extract bit flags
    tsprec, tcgram, tmulti, trandm, tordrd, talabs, txyxys, txvals = [
        x == "1" for x in reversed(list("{0:08b}".format(ord(Ftflgs))))
    ]

    #  Flag      Value   Description
    # TSPREC     0x01h   Y data blocks are 16 bit integer (only if fexp is NOT 0x80h)
    # TCGRAM     0x02h   Enables fexper in older software (not used)
    # TMULTI     0x04h   Multifile data format (more than one subfile)
    # TRANDM     0x08h   If TMULTI and TRANDM then Z values in SUBHDR structures are in random order (not used)
    # TORDRD     0x10h   If TMULTI and TORDRD then Z values are in ascending or descending ordered but not evenly spaced.
    #                    Z values read from individual SUBHDR structures.
    # TALABS     0x20h   Axis label text stored in fcatxt separated by nulls. Ignore fxtype, fytype, fztype corresponding
    #                    to non-null text in fcatxt.
    # TXYXYS     0x40h   Each subfile has unique X array; can only be used if TXVALS is also used. Used exclusively
    #                     to flag as MS data for drawing as “sticks” rather than connected lines.
    # TXVALS     0x80h   Non-evenly spaced X data. File has X value array preceding Y data block(s).

    techniques = [
        "General SPC",
        "Gas Chromatogram",
        "General Chromatogram",
        "HPLC Chromatogram",
        "FT-IR, FT-NIR, FT-Raman Spectrum",
        "NIR Spectrum",
        "UV-VIS Spectrum",
        None,
        "X-ray Diffraction Spectrum",
        "Mass Spectrum ",
        "NMR Spectrum or FID",
        "Raman Spectrum",
        "Fluorescence Spectrum",
        "Atomic Spectrum",
        "Chromatography Diode Array Spectra",
    ]

    technique = techniques[int.from_bytes(Fexper, endian)]

    if talabs:
        warn(
            "The SPC file has custom Unit Labels, but spc_reader does not yet take them into account "
            "and will use defaults. "
            "If needed let us know and submit a feature request :) ")

    x_or_z_title = [
        "axis title",
        "Wavenbumbers",
        "Wavelength",
        "Wavelength",
        "Time",
        "Time",
        "Frequency",
        "Frequency",
        "Frequency",
        "m/z",
        "Chemical shift",
        "Time",
        "Time",
        "Raman shift",
        "Energy",
        "text_label",
        "diode number",
        "Channel",
        "2 theta",
        "Temperature",
        "Temperature",
        "Temperature",
        "Data Points",
        "Time",
        "Time",
        "Time",
        "Frequency",
        "Wavelength",
        "Wavelength",
        "Wavelength",
        "Time",
    ]

    x_or_z_unit = [
        None,
        "cm^-1",
        "um",
        "nm",
        "s",
        "min",
        "Hz",
        "kHz",
        "MHz",
        "g/(mol * e)",
        "ppm",
        "days",
        "years",
        "cm^-1",
        "eV",
        None,
        None,
        None,
        "degree",
        "fahrenheit",
        "celsius",
        "kelvin",
        None,
        "ms",
        "us",
        "ns",
        "GHz",
        "cm",
        "m",
        "mm",
        "hour",
    ]

    ixtype = int.from_bytes(Fxtype, endian)
    if ixtype != 255:
        x_unit = x_or_z_unit[ixtype]
        x_title = x_or_z_title[ixtype]
    else:
        x_unit = None
        x_title = "Double interferogram"

    # if Fnsub > 1:
    #     iztype = int.from_bytes(Fztype, endian)
    #     if iztype != 255:
    #         z_unit = x_or_z_unit[iztype]
    #         z_title = x_or_z_title[iztype]
    #     else:
    #         z_unit = None
    #         z_title = "Double interferogram"

    y_title = [
        "Arbitrary Intensity",
        "Interferogram",
        "Absorbance",
        "Kubelka-Munk",
        "Counts",
        "Voltage",
        "Angle",
        "Intensity",
        "Length",
        "Voltage",
        "Log(1/R)",
        "Transmittance",
        "Intensity",
        "Relative Intensity",
        "Energy",
        None,
        "Decibel",
        None,
        None,
        "Temperature",
        "Temperature",
        "Temperature",
        "Index of Refraction [N]",
        "Extinction Coeff. [K]",
        "Real",
        "Imaginary",
        "Complex",
    ]

    y_unit = [
        None,
        None,
        "absorbance",
        "Kubelka_Munk",
        None,
        "Volt",
        "degree",
        "mA",
        "mm",
        "mV",
        None,
        "percent",
        None,
        None,
        None,
        None,
        "dB",
        None,
        None,
        "fahrenheit",
        "celsius",
        "kelvin",
        None,
        None,
        None,
        None,
        None,
    ]

    iytype = int.from_bytes(Fytype, endian)
    if iytype < 128:
        y_unit = y_unit[iytype]
        y_title = y_title[iytype]

    elif iytype == 128:
        y_unit = None
        y_title = "Transmission"

    elif iytype == 129:
        y_unit = None
        y_title = "Reflectance"

    elif iytype == 130:
        y_unit = None
        y_title = "Arbitrary or Single Beam with Valley Peaks"

    elif iytype == 131:
        y_unit = None
        y_title = "Emission"

    else:
        warn(
            "Wrong y unit label code in the SPC file. It will be set to arbitrary intensity"
        )
        y_unit = None
        y_title = "Arbitrary Intensity"

    if Fexp == b"\x80":
        iexp = None  # floating Point Data
    else:
        iexp = int.from_bytes(Fexp, endian)  # Datablock scaling Exponent

    # set date (from https://github.com/rohanisaac/spc/blob/master/spc/spc.py)
    year = Fdate >> 20
    month = (Fdate >> 16) % (2**4)
    day = (Fdate >> 11) % (2**5)
    hour = (Fdate >> 6) % (2**5)
    minute = Fdate % (2**6)

    if (year == 0 or month == 0
            or day == 0):  # occurs when acquision time is not reported
        timestamp = 0
        acqdate = datetime.fromtimestamp(0, tz=None)
        warn(f"No collection time found. Arbitrarily set to {acqdate}")
    else:
        acqdate = datetime(year, month, day, hour, minute)
        timestamp = acqdate.timestamp()

    sres = Fres.decode("utf-8")
    ssource = Fsource.decode("utf-8")

    scmnt = Fcmnt.decode("utf-8")

    # if Fwplanes:
    #     iwtype = int.from_bytes(Fwtype, endian)
    #     if iwtype != 255:
    #         w_unit = x_or_z_unit[ixtype]
    #         w_title = x_or_z_title[ixtype]
    #     else:
    #         w_unit = None
    #         w_title = "Double interferogram"

    if not txvals:  # evenly spaced x data
        spacing = (Flast - Ffirst) / (Fnpts - 1)
        _x = LinearCoord(
            offset=Ffirst,
            increment=spacing,
            size=Fnpts,
            title=x_title,
            units=x_unit,
        )
    else:
        _x = Coord(
            data=np.frombuffer(content,
                               offset=512,
                               dtype=float32_dtype,
                               count=Fnpts),
            title=x_title,
            units=x_unit,
        )

    if iexp is None:
        # 32-bit IEEE floating numbers
        floatY = np.frombuffer(content,
                               offset=544 + txvals * Fnpts * 4,
                               dtype=float32_dtype,
                               count=Fnpts)
    else:
        # fixed point signed fractions
        if tsprec:
            integerY = np.frombuffer(content,
                                     offset=544 + txvals * Fnpts * 4,
                                     dtype=int16_dtype,
                                     count=Fnpts)
            floatY = (2**iexp) * (integerY / (2**16))
        else:
            integerY = np.frombuffer(content,
                                     offset=544 + txvals * Fnpts * 4,
                                     dtype=int32_dtype,
                                     count=Fnpts)
            floatY = (2**iexp) * (integerY / (2**32))

    if Flogoff:  # read log data header
        (
            Logsizd,
            Logsizm,
            Logtxto,
            Logbins,
            Logdsks,
            Logspar,
        ) = struct.unpack(logstc_format.encode("utf-8"),
                          content[Flogoff:Flogoff + 21])

        logtxt = str(content[Flogoff + Logtxto:len(content)].decode("utf-8"))

    # Create NDDataset Object for the series
    dataset = NDDataset(np.expand_dims(floatY, axis=0))
    dataset.name = str(filename)
    dataset.units = y_unit
    dataset.title = y_title
    dataset.origin = "thermo galactic"

    # now add coordinates
    _y = Coord(
        [timestamp],
        title="acquisition timestamp (GMT)",
        units="s",
        labels=([acqdate], [filename]),
    )

    dataset.set_coordset(y=_y, x=_x)

    dataset.description = kwargs.get("description", "Dataset from spc file.\n")
    if ord(Fexper) != 0 and ord(Fexper) != 7:
        dataset.description += "Instrumental Technique: " + technique + "\n"
    if Fres != b"\x00\x00\x00\x00\x00\x00\x00\x00\x00":
        dataset.description += "Resolution: " + sres + "\n"
    if Fsource != b"\x00\x00\x00\x00\x00\x00\x00\x00\x00":
        dataset.description += "Source Instrument: " + ssource + "\n"
    if (Fcmnt !=
            b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
        ):
        dataset.description += "Memo: " + scmnt + "\n"
    if Flogoff:
        if Logtxto:
            dataset.description += "Log Text: \n---------\n"
            dataset.description += logtxt
            dataset.description += "---------\n"
        if Logbins or Logsizd:
            if Logtxto:
                dataset.description += (
                    "Note: The Log block of the spc file also contains: \n")
            else:
                dataset.description += (
                    "Note: The Log block of the spc file contains: \n")
            if Logbins:
                dataset.description += f"a Log binary block of size {Logbins} bytes "
            if Logsizd:
                dataset.description += f"a Log disk block of size {Logsizd} bytes "

    dataset.history = str(datetime.now(
        timezone.utc)) + ":imported from spc file {} ; ".format(filename)

    if y_unit == "Interferogram":
        # interferogram
        dataset.meta.interferogram = True
        dataset.meta.td = list(dataset.shape)
        dataset.x._zpd = Fpeakpt
        dataset.meta.laser_frequency = Quantity("15798.26 cm^-1")
        dataset.x.set_laser_frequency()
        dataset.x._use_time_axis = (
            False  # True to have time, else it will be optical path difference
        )

    fid.close()
    return dataset
示例#14
0
def _read_opus(*args, **kwargs):
    debug_('Bruker OPUS import')

    dataset, filename = args
    content = kwargs.get('content', None)

    if content:
        fid = io.BytesIO(content)
    else:
        fid = open(filename, 'rb')

    opus_data = _read_data(fid)

    # data
    try:
        npt = opus_data['AB Data Parameter']['NPT']
        data = opus_data["AB"][:npt]
        dataset.data = np.array(data[np.newaxis], dtype='float32')
    except KeyError:
        raise IOError(
            f"{filename} is not an Absorbance spectrum. It cannot be read with the `read_opus` import method"
        )

    # xaxis
    fxv = opus_data['AB Data Parameter']['FXV']
    lxv = opus_data['AB Data Parameter']['LXV']
    # xdata = linspace(fxv, lxv, npt)
    xaxis = LinearCoord.linspace(fxv,
                                 lxv,
                                 npt,
                                 title='wavenumbers',
                                 units='cm^-1')

    # yaxis
    name = opus_data["Sample"]['SNM']
    acqdate = opus_data["AB Data Parameter"]["DAT"]
    acqtime = opus_data["AB Data Parameter"]["TIM"]
    gmt_offset_hour = float(acqtime.split('GMT')[1].split(')')[0])
    date_time = datetime.strptime(acqdate + '_' + acqtime.split()[0],
                                  '%d/%m/%Y_%H:%M:%S.%f')
    utc_dt = date_time - timedelta(hours=gmt_offset_hour)
    utc_dt = utc_dt.replace(tzinfo=timezone.utc)
    timestamp = utc_dt.timestamp()
    yaxis = Coord([timestamp],
                  title='acquisition timestamp (GMT)',
                  units='s',
                  labels=([utc_dt], [name]))

    # set dataset's Coordset
    dataset.set_coordset(y=yaxis, x=xaxis)
    dataset.units = 'absorbance'
    dataset.title = 'absorbance'

    # Set name, origin, description and history
    dataset.name = filename.name
    dataset.origin = "opus"
    dataset.description = 'Dataset from opus files. \n'
    dataset.history = str(datetime.now(
        timezone.utc)) + ': import from opus files \n'
    dataset._date = datetime.now(timezone.utc)
    dataset._modified = dataset.date

    return dataset
示例#15
0
def fft(dataset, size=None, sizeff=None, inv=False, ppm=True, **kwargs):
    """
    Apply a complex fast fourier transform.

    For multidimensional NDDataset,
    the apodization is by default performed on the last dimension.

    The data in the last dimension MUST be in time-domain (or without dimension)
    or an error is raised.

    To make reverse Fourier transform, i.e., from frequency to time domain, use the `ifft` transform
    (or equivalently, the `inv=True` parameters.

    Parameters
    ----------
    dataset : |NDDataset|
        The dataset on which to apply the fft transformation.
    size : int, optional
        Size of the transformed dataset dimension - a shorter parameter is `si`. by default, the size is the closest
        power of two greater than the data size.
    sizeff : int, optional
        The number of effective data point to take into account for the transformation. By default it is equal to the
        data size, but may be smaller.
    inv : bool, optional, default=False
        If True, an inverse Fourier transform is performed - size parameter is not taken into account.
    ppm : bool, optional, default=True
        If True, and data are from NMR, then a ppm scale is calculated instead of frequency.
    **kwargs
        Optional keyword parameters (see Other Parameters).

    Returns
    -------
    out
        Transformed |NDDataset|.

    Other Parameters
    ----------------
    dim : str or int, optional, default='x'.
        Specify on which dimension to apply this method. If `dim` is specified as an integer it is equivalent
        to the usual `axis` numpy parameter.
    inplace : bool, optional, default=False.
        True if we make the transform inplace.  If False, the function return a new object
    tdeff : int, optional
        Alias of sizeff (specific to NMR). If both sizeff and tdeff are passed, sizeff has the priority.

    See Also
    --------
    ifft : Inverse Fourier transform.
    """
    # datatype
    is_nmr = dataset.origin.lower() in [
        "topspin",
    ]
    is_ir = dataset.meta.interferogram

    # On which axis do we want to apply transform (get axis from arguments)
    dim = kwargs.pop("dim", kwargs.pop("axis", -1))
    axis, dim = dataset.get_axis(dim, negative_axis=True)

    # output dataset inplace or not
    inplace = kwargs.pop("inplace", False)
    if not inplace:  # default
        new = dataset.copy()  # copy to be sure not to modify this dataset
    else:
        new = dataset

    # The last dimension is always the dimension on which we apply the fourier transform.
    # If needed, we swap the dimensions to be sure to be in this situation
    swapped = False
    if axis != -1:
        new.swapdims(axis, -1, inplace=True)  # must be done in  place
        swapped = True

    # Select the last coordinates
    x = new.coordset[dim]

    # Performs some dimentionality checking
    error = False
    if (not inv and not x.unitless and not x.dimensionless
            and x.units.dimensionality != "[time]"):
        error_(
            "fft apply only to dimensions with [time] dimensionality or dimensionless coords\n"
            "fft processing was thus cancelled")
        error = True

    elif (inv and not x.unitless and x.units.dimensionality != "1/[time]"
          and not x.dimensionless):
        error_(
            "ifft apply only to dimensions with [frequency] dimensionality or with ppm units "
            "or dimensionless coords.\n ifft processing was thus cancelled")
        error = True

    # Should not be masked
    elif new.is_masked:
        error_(
            "current fft or ifft processing does not support masked data as input.\n processing was thus cancelled"
        )
        error = True

    # Coordinates should be uniformly spaced (linear coordinate)
    if not x.linear:
        # try to linearize it
        x.linear = True
        if not x.linear:
            # linearization failed
            error = True

    if hasattr(x, "_use_time_axis"):
        x._use_time_axis = True  # we need to havze dimentionless or time units

    if not error:
        # OK we can proceed

        # time domain size
        td = None
        if not inv:
            td = x.size

        # if no size (or si) parameter then use the size of the data (size not used for inverse transform
        if size is None or inv:
            size = kwargs.get("si", x.size)

        # we default to the closest power of two larger of the data size
        if is_nmr:
            size = largest_power_of_2(size)

        # do we have an effective td to apply
        tdeff = sizeff
        if tdeff is None:
            tdeff = kwargs.get("tdeff", td)

        if tdeff is None or tdeff < 5 or tdeff > size:
            tdeff = size

        # Eventually apply the effective size
        new[..., tdeff:] = 0.0

        # Should we work on complex or hypercomplex data
        # interleaved is in case of >2D data  ( # TODO: >D not yet implemented in ndcomplex.py
        iscomplex = False
        if axis == -1:
            iscomplex = new.is_complex
        if new.is_quaternion or new.is_interleaved:
            iscomplex = True

        # If we are in NMR we have an additional complication due to the mode
        # of acquisition (sequential mode when ['QSEQ','TPPI','STATES-TPPI'])
        encoding = "undefined"
        if not inv and "encoding" in new.meta:
            encoding = new.meta.encoding[-1]

        qsim = encoding in ["QSIM", "DQD"]
        qseq = "QSEQ" in encoding
        states = "STATES" in encoding
        echoanti = "ECHO-ANTIECHO" in encoding
        tppi = "TPPI" in encoding
        qf = "QF" in encoding

        zf_size(new, size=size, inplace=True)

        # Perform the fft
        if qsim:  # F2 fourier transform
            data = _fft(new.data)

        elif qseq:
            raise NotImplementedError("QSEQ not yet implemented")

        elif states:
            data = _states_fft(new.data, tppi)

        elif tppi:
            data = _tppi_fft(new.data)

        elif echoanti:
            data = _echoanti_fft(new.data)

        elif qf:
            # we must perform a real fourier transform of a time domain dataset
            data = _qf_fft(new.data)

        elif iscomplex and inv:
            # We assume no special encoding for inverse complex fft transform
            data = _ifft(new.data)

        elif not iscomplex and not inv and is_ir:
            # transform interferogram
            data = _interferogram_fft(new.data)

        elif not iscomplex and inv:
            raise NotImplementedError("Inverse FFT for real dimension")

        else:
            raise NotImplementedError(
                f"{encoding} not yet implemented. We recommend you to put an issue on "
                f"Github, so we will not forget to work on this!.")

        # We need here to create a new dataset with new shape and axis
        new._data = data
        new.mask = False

        # create new coordinates for the transformed data

        if is_nmr:
            sfo1 = new.meta.sfo1[-1]
            bf1 = new.meta.bf1[-1]
            sf = new.meta.sf[-1]
            sw = new.meta.sw_h[-1]
            if new.meta.nuc1 is not None:
                nuc1 = new.meta.nuc1[-1]
                regex = r"([^a-zA-Z]+)([a-zA-Z]+)"
                m = re.match(regex, nuc1)
                if m is not None:
                    mass = m[1]
                    name = m[2]
                    nucleus = "^{" + mass + "}" + name
                else:
                    nucleus = ""
            else:
                nucleus = ""
        else:
            sfo1 = 0 * ur.Hz
            bf1 = sfo1
            dw = x.spacing
            if isinstance(dw, list):
                print()
            sw = 1 / 2 / dw
            sf = -sw / 2
            size = size // 2

        if not inv:
            # time to frequency
            sizem = max(size - 1, 1)
            deltaf = -sw / sizem
            first = sfo1 - sf - deltaf * sizem / 2.0

            # newcoord = type(x)(np.arange(size) * deltaf + first)
            newcoord = LinearCoord.arange(size) * deltaf + first
            newcoord.show_datapoints = False
            newcoord.name = x.name
            new.title = "intensity"
            if is_nmr:
                newcoord.title = f"${nucleus}$ frequency"
                newcoord.ito("Hz")
            elif is_ir:
                new._units = None
                newcoord.title = "wavenumbers"
                newcoord.ito("cm^-1")
            else:
                newcoord.title = "frequency"
                newcoord.ito("Hz")

        else:
            # frequency or ppm to time
            sw = abs(x.data[-1] - x.data[0])
            if x.units == "ppm":
                sw = bf1.to("Hz") * sw / 1.0e6
            deltat = (1.0 / sw).to("us")

            newcoord = LinearCoord.arange(size) * deltat
            newcoord.name = x.name
            newcoord.title = "time"
            newcoord.ito("us")

        if is_nmr and not inv:
            newcoord.meta.larmor = bf1  # needed for ppm transformation
            ppm = kwargs.get("ppm", True)
            if ppm:
                newcoord.ito("ppm")
                newcoord.title = rf"$\delta\ {nucleus}$"

        new.coordset[dim] = newcoord

        # update history
        s = "ifft" if inv else "fft"
        new.history = f"{s} applied on dimension {dim}"

        # PHASE ?
        iscomplex = new.is_complex or new.is_quaternion
        if iscomplex and not inv:
            # phase frequency domain

            # if some phase related metadata do not exist yet, initialize them
            new.meta.readonly = False

            if not new.meta.phased:
                new.meta.phased = [False] * new.ndim

            if not new.meta.phc0:
                new.meta.phc0 = [0] * new.ndim

            if not new.meta.phc1:
                new.meta.phc1 = [0] * new.ndim

            if not new.meta.exptc:
                new.meta.exptc = [0] * new.ndim

            if not new.meta.pivot:
                new.meta.pivot = [0] * new.ndim

            # applied the stored phases
            new.pk(inplace=True)

            new.meta.pivot[-1] = abs(new).coordmax(dim=dim)
            new.meta.readonly = True

    # restore original data order if it was swapped
    if swapped:
        new.swapdims(axis, -1, inplace=True)  # must be done inplace

    return new
示例#16
0
def _read_opus(*args, **kwargs):
    debug_("Bruker OPUS import")

    dataset, filename = args
    content = kwargs.get("content", None)

    if content:
        fid = io.BytesIO(content)
    else:
        fid = open(filename, "rb")

    opus_data = _read_data(fid)

    # data
    try:
        npt = opus_data["AB Data Parameter"]["NPT"]
        data = opus_data["AB"][:npt]
        dataset.data = np.array(data[np.newaxis], dtype="float32")
    except KeyError:
        raise IOError(
            f"{filename} is not an Absorbance spectrum. It cannot be read with the `read_opus` import method"
        )
    # todo: read background

    # xaxis
    fxv = opus_data["AB Data Parameter"]["FXV"]
    lxv = opus_data["AB Data Parameter"]["LXV"]
    # xdata = linspace(fxv, lxv, npt)
    xaxis = LinearCoord.linspace(fxv, lxv, npt, title="wavenumbers", units="cm^-1")

    # yaxis
    name = opus_data["Sample"]["SNM"]
    acqdate = opus_data["AB Data Parameter"]["DAT"]
    acqtime = opus_data["AB Data Parameter"]["TIM"]
    gmt_offset_hour = float(acqtime.split("GMT")[1].split(")")[0])
    if len(acqdate.split("/")[0]) == 2:
        date_time = datetime.strptime(
            acqdate + "_" + acqtime.split()[0], "%d/%m/%Y_%H:%M:%S.%f"
        )
    elif len(acqdate.split("/")[0]) == 4:
        date_time = datetime.strptime(
            acqdate + "_" + acqtime.split()[0], "%Y/%m/%d_%H:%M:%S"
        )
    utc_dt = date_time - timedelta(hours=gmt_offset_hour)
    utc_dt = utc_dt.replace(tzinfo=timezone.utc)
    timestamp = utc_dt.timestamp()

    yaxis = Coord(
        [timestamp],
        title="acquisition timestamp (GMT)",
        units="s",
        labels=([utc_dt], [name], [filename]),
    )

    # set dataset's Coordset
    dataset.set_coordset(y=yaxis, x=xaxis)
    dataset.units = "absorbance"
    dataset.title = "absorbance"

    # Set name, origin, description and history
    dataset.name = filename.name
    dataset.origin = "opus"
    dataset.description = "Dataset from opus files. \n"
    dataset.history = str(datetime.now(timezone.utc)) + ": import from opus files \n"
    dataset._date = datetime.now(timezone.utc)
    dataset._modified = dataset.date

    return dataset
示例#17
0
def plot_2D(dataset, **kwargs):
    """
    PLot of 2D array.

    Parameters
    ----------
    dataset : :class:`~spectrochempy.ddataset.nddataset.NDDataset`
        The dataset to plot.
    ax : |Axes| instance. Optional
        The axe where to plot. The default is the current axe or to create a new one if is None.
    clear : `bool`, optional, default=`True`
        Should we plot on the ax previously used or create a new figure?.
    figsize : tuple, optional
        The figure size expressed as a tuple (w,h) in inch.

    Other Parameters
    -----------------
    method : ['stack', 'map', 'image', 'surface', 'waterfall'] , optional
        The method of plot of the dataset, which will determine the plotter to use. Default is stack
    fontsize : int, optional
        The font size in pixels, default is 10 (or read from preferences).
    style : str
    autolayout : `bool`, optional, default=True
        if True, layout will be set automatically.
    output : str
        A string containing a path to a filename. The output format is deduced
        from the extension of the filename. If the filename has no extension,
        the value of the rc parameter savefig.format is used.
    dpi : [ None | scalar > 0]
        The resolution in dots per inch. If None it will default to the
        value savefig.dpi in the matplotlibrc file.
    colorbar :
    transposed :
    clear :
    ax :
    twinx :
    use_plotly : bool, optional
        Should we use plotly instead of mpl for plotting. Default to `preferences.use_plotly`  (default=False)
    data_only : `bool` [optional, default=`False`]
        Only the plot is done. No addition of axes or label specifications
        (current if any or automatic settings are kept.
    method : str [optional among ``map``, ``stack``, ``image`` or ``3D``]
        The type of plot,
    projections : `bool` [optional, default=False]
    style : str, optional, default='notebook'
        Matplotlib stylesheet (use `available_style` to get a list of available
        styles for plotting
    reverse : `bool` or None [optional, default=None
        In principle, coordinates run from left to right, except for wavenumbers
        (e.g., FTIR spectra) or ppm (e.g., NMR), that spectrochempy
        will try to guess. But if reverse is set, then this is the
        setting which will be taken into account.
    x_reverse : `bool` or None [optional, default=None
    kwargs : additional keywords
    """

    # Get preferences
    # ------------------------------------------------------------------------------------------------------------------

    prefs = dataset.preferences

    # before going further, check if the style is passed in the parameters
    style = kwargs.pop('style', None)
    if style is not None:
        prefs.style = style
    # else we assume this has been set before calling plot()

    prefs.set_latex_font(prefs.font.family)  # reset latex settings

    # Redirections ?
    # ------------------------------------------------------------------------------------------------------------------

    # should we redirect the plotting to another method
    if dataset._squeeze_ndim < 2:
        return dataset.plot_1D(**kwargs)

    # if plotly execute plotly routine not this one
    if kwargs.get('use_plotly', prefs.use_plotly):
        return dataset.plotly(**kwargs)

    # Method of plot
    # ------------------------------------------------------------------------------------------------------------------

    method = kwargs.get('method', prefs.method_2D)
    # do not display colorbar if it's not a surface plot
    # except if we have asked to d so

    # often we do need to plot only data when plotting on top of a previous plot
    data_only = kwargs.get('data_only', False)

    # Get the data to plot
    # -------------------------------------------------------------------------------------------------------------------

    # if we want to plot the transposed dataset
    transposed = kwargs.get('transposed', False)
    if transposed:
        new = dataset.copy().T  # transpose dataset
        nameadd = '.T'
    else:
        new = dataset  # .copy()
        nameadd = ''
    new = new.squeeze()

    if kwargs.get('y_reverse', False):
        new = new[::-1]

    # Figure setup
    # ------------------------------------------------------------------------------------------------------------------
    new._figure_setup(ndim=2, **kwargs)

    ax = new.ndaxes['main']
    ax.name = ax.name + nameadd

    # Other properties that can be passed as arguments
    # ------------------------------------------------------------------------------------------------------------------

    lw = kwargs.get('linewidth', kwargs.get('lw', prefs.lines_linewidth))
    alpha = kwargs.get('calpha', prefs.contour_alpha)

    number_x_labels = prefs.number_of_x_labels
    number_y_labels = prefs.number_of_y_labels
    number_z_labels = prefs.number_of_z_labels

    if method in ['waterfall']:
        nxl = number_x_labels * 2
        nyl = number_z_labels * 2
    elif method in ['stack']:
        nxl = number_x_labels
        nyl = number_z_labels
    else:
        nxl = number_x_labels
        nyl = number_y_labels

    ax.xaxis.set_major_locator(MaxNLocator(nbins=nxl))
    ax.yaxis.set_major_locator(MaxNLocator(nbins=nyl))
    if method not in ['surface']:
        ax.xaxis.set_ticks_position('bottom')
        ax.yaxis.set_ticks_position('left')

    # the next lines are to avoid multipliers in axis scale
    formatter = ScalarFormatter(useOffset=False)
    ax.xaxis.set_major_formatter(formatter)
    ax.yaxis.set_major_formatter(formatter)

    # ------------------------------------------------------------------------------------------------------------------
    # Set axis
    # ------------------------------------------------------------------------------------------------------------------

    # set the abscissa axis
    # ------------------------------------------------------------------------------------------------------------------
    # the actual dimension name is the last in the new.dims list
    dimx = new.dims[-1]
    x = getattr(new, dimx)
    if x is not None and x.implements('CoordSet'):
        # if several coords, take the default ones:
        x = x.default
    xsize = new.shape[-1]
    show_x_points = False
    if x is not None and hasattr(x, 'show_datapoints'):
        show_x_points = x.show_datapoints
    if show_x_points:
        # remove data and units for display
        x = LinearCoord.arange(xsize)

    discrete_data = False

    if x is not None and (not x.is_empty or x.is_labeled):
        xdata = x.data
        if not np.any(xdata):
            if x.is_labeled:
                discrete_data = True
                # take into account the fact that sometimes axis have just labels
                xdata = range(1, len(x.labels) + 1)
    else:
        xdata = range(xsize)

    xl = [xdata[0], xdata[-1]]
    xl.sort()

    if xsize < number_x_labels + 1:
        # extend the axis so that the labels are not too close to the limits
        inc = abs(xdata[1] - xdata[0]) * .5
        xl = [xl[0] - inc, xl[1] + inc]

    if data_only:
        xl = ax.get_xlim()

    xlim = list(kwargs.get('xlim', xl))
    xlim.sort()
    xlim[-1] = min(xlim[-1], xl[-1])
    xlim[0] = max(xlim[0], xl[0])

    if kwargs.get('x_reverse', kwargs.get('reverse',
                                          x.reversed if x else False)):
        xlim.reverse()

    ax.set_xlim(xlim)

    xscale = kwargs.get("xscale", "linear")
    ax.set_xscale(xscale)  # , nonpositive='mask')

    # set the ordinates axis
    # ------------------------------------------------------------------------------------------------------------------
    # the actual dimension name is the second in the new.dims list
    dimy = new.dims[-2]
    y = getattr(new, dimy)
    if y is not None and y.implements('CoordSet'):
        # if several coords, take the default ones:
        y = y.default
    ysize = new.shape[-2]

    show_y_points = False
    if y is not None and hasattr(y, 'show_datapoints'):
        show_y_points = y.show_datapoints
    if show_y_points:
        # remove data and units for display
        y = LinearCoord.arange(ysize)

    if y is not None and (not y.is_empty or y.is_labeled):
        ydata = y.data

        if not np.any(ydata):
            if y.is_labeled:
                ydata = range(1, len(y.labels) + 1)
    else:
        ydata = range(ysize)

    yl = [ydata[0], ydata[-1]]
    yl.sort()

    if ysize < number_y_labels + 1:
        # extend the axis so that the labels are not too close to the limits
        inc = abs(ydata[1] - ydata[0]) * .5
        yl = [yl[0] - inc, yl[1] + inc]

    if data_only:
        yl = ax.get_ylim()

    ylim = list(kwargs.get("ylim", yl))
    ylim.sort()
    ylim[-1] = min(ylim[-1], yl[-1])
    ylim[0] = max(ylim[0], yl[0])

    yscale = kwargs.get("yscale", "linear")
    ax.set_yscale(yscale)

    # z intensity (by default we plot real component of the data)
    # ------------------------------------------------------------------------------------------------------------------

    if not kwargs.get('imag', False):
        zdata = new.real.masked_data
    else:
        zdata = new.RI.masked_data  # new.imag.masked_data #TODO: quaternion case (3 imag.components)

    zlim = kwargs.get('zlim', (np.ma.min(zdata), np.ma.max(zdata)))

    if method in ['stack', 'waterfall']:

        # the z axis info
        # ---------------
        # zl = (np.min(np.ma.min(ys)), np.max(np.ma.max(ys)))
        amp = 0  # np.ma.ptp(zdata) / 50.
        zl = (np.min(np.ma.min(zdata) - amp), np.max(np.ma.max(zdata)) + amp)
        zlim = list(kwargs.get('zlim', zl))
        zlim.sort()
        z_reverse = kwargs.get('z_reverse', False)
        if z_reverse:
            zlim.reverse()

        # set the limits
        # ---------------

        if yscale == "log" and min(zlim) <= 0:
            # set the limits wrt smallest and largest strictly positive values
            ax.set_ylim(10**(int(np.log10(np.amin(np.abs(zdata)))) - 1),
                        10**(int(np.log10(np.amax(np.abs(zdata)))) + 1))
        else:
            ax.set_ylim(zlim)

    else:

        # the y axis info
        # ----------------
        if data_only:
            ylim = ax.get_ylim()

        ylim = list(kwargs.get('ylim', ylim))
        ylim.sort()
        y_reverse = kwargs.get('y_reverse', y.reversed if y else False)
        if y_reverse:
            ylim.reverse()

        # set the limits
        # ----------------
        ax.set_ylim(ylim)

    # ------------------------------------------------------------------------------------------------------------------
    # plot the dataset
    # ------------------------------------------------------------------------------------------------------------------
    ax.grid(prefs.axes_grid)

    normalize = kwargs.get('normalize', None)
    cmap = kwargs.get('colormap', kwargs.get('cmap', prefs.colormap))

    if method in ['map', 'image', 'surface']:
        zmin, zmax = zlim
        zmin = min(zmin, -zmax)
        zmax = max(-zmin, zmax)
        norm = mpl.colors.Normalize(vmin=zmin, vmax=zmax)

    if method in ['surface']:
        X, Y = np.meshgrid(xdata, ydata)
        Z = zdata.copy()

        # masker data not taken into account in surface plot
        Z[dataset.mask] = np.nan

        # Plot the surface.  #TODO : improve this (or remove it)

        antialiased = kwargs.get('antialiased', prefs.antialiased)
        rcount = kwargs.get('rcount', prefs.rcount)
        ccount = kwargs.get('ccount', prefs.ccount)
        ax.set_facecolor('w')
        ax.plot_surface(
            X,
            Y,
            Z,
            cmap=cmap,
            linewidth=lw,
            antialiased=antialiased,
            rcount=rcount,
            ccount=ccount,
            edgecolor='k',
            norm=norm,
        )

    if method in ['waterfall']:
        _plot_waterfall(ax, new, xdata, ydata, zdata, prefs, xlim, ylim, zlim,
                        **kwargs)

    elif method in ['image']:

        cmap = kwargs.get('cmap', kwargs.get('image_cmap', prefs.image_cmap))
        if discrete_data:
            method = 'map'

        else:
            kwargs['nlevels'] = 500
            if not hasattr(new, 'clevels') or new.clevels is None:
                new.clevels = _get_clevels(zdata, prefs, **kwargs)
            c = ax.contourf(xdata, ydata, zdata, new.clevels, alpha=alpha)
            c.set_cmap(cmap)
            c.set_norm(norm)

    elif method in ['map']:
        if discrete_data:

            _colormap = plt.get_cmap(cmap)
            scalarMap = mpl.cm.ScalarMappable(norm=norm, cmap=_colormap)

            # marker = kwargs.get('marker', kwargs.get('m', None))
            markersize = kwargs.get('markersize', kwargs.get('ms', 5.))
            # markevery = kwargs.get('markevery', kwargs.get('me', 1))

            for i in ydata:
                for j in xdata:
                    li, = ax.plot(j,
                                  i,
                                  lw=lw,
                                  marker='o',
                                  markersize=markersize)
                    li.set_color(scalarMap.to_rgba(zdata[i - 1, j - 1]))

        else:
            # contour plot
            # -------------
            if not hasattr(new, 'clevels') or new.clevels is None:
                new.clevels = _get_clevels(zdata, prefs, **kwargs)

            c = ax.contour(xdata,
                           ydata,
                           zdata,
                           new.clevels,
                           linewidths=lw,
                           alpha=alpha)
            c.set_cmap(cmap)
            c.set_norm(norm)

    elif method in ['stack']:

        # stack plot
        # ----------

        # now plot the collection of lines
        # --------------------------------
        # map colors using the colormap

        vmin, vmax = ylim
        norm = mpl.colors.Normalize(vmin=vmin,
                                    vmax=vmax)  # we normalize to the max time
        if normalize is not None:
            norm.vmax = normalize

        _colormap = plt.get_cmap(cmap)
        scalarMap = mpl.cm.ScalarMappable(norm=norm, cmap=_colormap)

        # we display the line in the reverse order, so that the last
        # are behind the first.

        clear = kwargs.get('clear', True)
        lines = []
        if not clear and not transposed:
            lines.extend(ax.lines)  # keep the old lines

        line0, = ax.plot(xdata, zdata[0], lw=lw, picker=True)

        for i in range(zdata.shape[0]):
            li = cpy(line0)
            li.set_ydata(zdata[i])
            lines.append(li)
            li.set_color(scalarMap.to_rgba(ydata[i]))
            fmt = kwargs.get('label_fmt', "{:.5f}")
            li.set_label(fmt.format(ydata[i]))
            li.set_zorder(zdata.shape[0] + 1 - i)

        # store the full set of lines
        new._ax_lines = lines[:]

        # but display only a subset of them in order to accelerate the drawing
        maxlines = kwargs.get('maxlines', prefs.max_lines_in_stack)
        setpy = max(len(new._ax_lines) // maxlines, 1)
        ax.lines = new._ax_lines[::setpy]  # displayed ax lines

    if data_only or method in ['waterfall']:
        # if data only (we will not set axes and labels
        # it was probably done already in a previous plot
        new._plot_resume(dataset, **kwargs)
        return ax

    # display a title
    # ------------------------------------------------------------------------------------------------------------------
    title = kwargs.get('title', None)
    if title:
        ax.set_title(title)
    elif kwargs.get('plottitle', False):
        ax.set_title(new.name)

    # ------------------------------------------------------------------------------------------------------------------
    # labels
    # ------------------------------------------------------------------------------------------------------------------

    # x label
    # ------------------------------------------------------------------------------------------------------------------
    xlabel = kwargs.get("xlabel", None)
    if show_x_points:
        xlabel = 'data points'
    if not xlabel:
        xlabel = make_label(x, new.dims[-1])
    ax.set_xlabel(xlabel)

    uselabelx = kwargs.get('uselabel_x', False)
    if x and x.is_labeled and (uselabelx or not np.any(x.data)) and len(
            x.labels) < number_x_labels + 1:
        # TODO refine this to use different orders of labels
        ax.set_xticks(xdata)
        ax.set_xticklabels(x.labels)

    # y label
    # ------------------------------------------------------------------------------------------------------------------
    ylabel = kwargs.get("ylabel", None)
    if show_y_points:
        ylabel = 'data points'
    if not ylabel:
        if method in ['stack']:
            ylabel = make_label(new, 'values')

        else:
            ylabel = make_label(y, new.dims[-2])
            # y tick labels
            uselabely = kwargs.get('uselabel_y', False)
            if y and y.is_labeled and (uselabely or not np.any(
                    y.data)) and len(y.labels) < number_y_labels:
                # TODO refine this to use different orders of labels
                ax.set_yticks(ydata)
                ax.set_yticklabels(y.labels)

    # z label
    # ------------------------------------------------------------------------------------------------------------------
    zlabel = kwargs.get("zlabel", None)
    if not zlabel:
        if method in ['stack']:
            zlabel = make_label(y, new.dims[-2])
        elif method in ['surface']:
            zlabel = make_label(new, 'values')
            ax.set_zlabel(zlabel)
        else:
            zlabel = make_label(new, 'z')

    # do we display the ordinate axis?
    if kwargs.get('show_y', True):
        ax.set_ylabel(ylabel)
    else:
        ax.set_yticks([])

    if 'colorbar' in new.ndaxes:
        if 'surface' not in method and (not hasattr(new, '_axcb')
                                        or not new._axcb):
            axec = new.ndaxes['colorbar']
            axec.name = axec.name + nameadd
            new._axcb = mpl.colorbar.ColorbarBase(axec,
                                                  cmap=plt.get_cmap(cmap),
                                                  norm=norm)
            new._axcb.set_label(zlabel)
    #        else:
    #            new._fig.colorbar(surf, shrink=0.5, aspect=10)

    # do we display the zero line
    if kwargs.get('show_zero', False):
        ax.haxlines()

    new._plot_resume(dataset, **kwargs)

    return ax
示例#18
0
def _read_topspin(*args, **kwargs):
    debug_("Bruker TOPSPIN file reading")
    dataset, path = args
    #    content = kwargs.get('content', None)

    # is-it a processed dataset (1r, 2rr ....
    processed = True if path.match("pdata/*/*") else False

    # ------------------------------------------------------------------------
    # start reading ....
    # ------------------------------------------------------------------------

    parents = path.parents

    # Get data and acquisition parameters

    if not processed:
        # a fid or a ser has been selected
        f_expno = parents[0]
        expno = f_expno.name
        procno = kwargs.get("procno", "1")
        f_procno = f_expno / "pdata" / procno
        f_name = parents[1]

    else:
        # a processes spectra has been selected (1r, ....)
        f_procno = parents[0]
        procno = f_procno.name
        f_expno = parents[2]
        expno = f_expno.name
        f_name = parents[3]

    acqus_files = _get_files(f_expno, "acqu")
    procs_files = _get_files(f_procno, "proc")

    if not processed:

        dic, data = read_fid(f_expno,
                             acqus_files=acqus_files,
                             procs_files=procs_files)

        # apply a -90 phase shift to be compatible with topspin
        data = data * np.exp(-1j * np.pi / 2.0)

        # Look the case when the reshaping was not correct
        # for example, this happen when the number
        # of accumulated row was incomplete
        if path.name in ["ser"] and data.ndim == 1:
            # we must reshape using the acqu parameters
            td1 = dic["acqu2"]["TD"]
            try:
                data = data.reshape(td1, -1)
            except ValueError:
                try:
                    td = dic["acqu"]["TD"] // 2
                    data = data.reshape(-1, td)
                except ValueError:
                    raise KeyError("Inconsistency between TD's and data size")

            # reduce to td
            ntd = dic["acqus"]["TD"] // 2
            data = data[..., :ntd]

        # Eliminate the digital filter
        if kwargs.get("remove_digital_filter",
                      True) and dic["acqus"]["DECIM"] > 1:
            data = _remove_digital_filter(dic, data)

    else:

        dic, datalist = read_pdata(
            f_procno,
            acqus_files=acqus_files,
            procs_files=procs_files,
            all_components=True,
        )
        if isinstance(datalist, list):
            if datalist[0].ndim == 2:
                data, dataRI, dataIR, dataII = datalist
                # make quaternion
                shape = data.shape
                data = as_quat_array(
                    list(
                        zip(
                            data.flatten(),
                            dataRI.flatten(),
                            dataIR.flatten(),
                            dataII.flatten(),
                        )))
                data = data.reshape(shape)

            elif datalist[0].ndim == 1:
                # make complex
                data, dataI = datalist
                data = data + dataI * 1.0j

            else:
                return None
        else:
            data = datalist

    # ........................................................................................................
    # we now make some rearrangement of the dic to have something more user friendly
    # we assume that all experiments have similar (important) parameters so that the experiments are compatibles

    meta = Meta()  # This is the parameter dictionary
    datatype = path.name.upper() if not processed else f"{data.ndim}D"

    keys = sorted(dic.keys())

    # we need the ndim of the data
    parmode = int(dic["acqus"].get("PARMODE", data.ndim - 1))
    if parmode + 1 != data.ndim:
        raise KeyError(
            f"The NMR data were not read properly as the PARMODE+1 parameter ({parmode + 1}) doesn't fit"
            f" the actual number of dimensions ({data.ndim})")

    # read the acqu and proc
    valid_keys = list(zip(*nmr_valid_meta))[0]
    keys_units = dict(nmr_valid_meta)

    for item in keys:

        if item[:4] in ["acqu", "proc"]:
            dim = parmode
            if len(item) > 4 and item[4] in ["2", "3"]:
                dim = parmode + 1 - int(item[4])

            for key in sorted(dic[item]):

                if key.startswith("_") or key.lower() not in valid_keys:
                    continue

                value = dic[item][key]
                units = ur(keys_units[key.lower()]) if keys_units[
                    key.lower()] else None

                if units is not None:
                    if isinstance(value, (float, int)):
                        value = value * units  # make a quantity
                    elif isinstance(value, list) and isinstance(
                            value[0], (float, int)):
                        value = np.array(value) * units

                if key.lower() not in meta:
                    meta[key.lower()] = [None] * data.ndim

                try:
                    meta[key.lower()][dim] = value
                except Exception:
                    pass

        else:

            meta[item.lower()] = dic[item]

    # Warning: from now all parameter keys are lowercase.

    # correct some initial values

    meta.encoding = [0] * (parmode + 1)
    meta.iscomplex = [False] * (parmode + 1)

    if not processed:
        meta.isfreq = [False]
        meta.encoding[-1] = AQ_mod[meta.aq_mod[-1]]
        meta.iscomplex[-1] = meta.aq_mod[-1] > 0

    if datatype in ["SER"]:
        meta.isfreq.insert(0, False)

        if meta.fnmode[-2] == 0:
            # For historical reasons,
            # MC2 is interpreted when the acquisition status
            # parameter FnMODE has the value undefined, i.e. 0
            if meta.mc2 is not None:
                meta.fnmode[-2] = meta.mc2[-2] + 1

        meta.encoding[-2] = FnMODE[meta.fnmode[-2]]
        meta.iscomplex[-2] = meta.fnmode[-2] > 1

        if parmode == 2:
            meta.isfreq.insert(0, False)
            if meta.fnmode[-3] == 0 and meta.mc2 is not None:
                meta.fnmode[-3] = meta.mc2[-3] + 1
            meta.encoding[-3] = FnMODE[meta.fnmode[-3]]
            meta.iscomplex[-3] = meta.fnmode[-3] > 1

    # correct TD, so it is the number of complex points, not the number of data
    # not for the last dimension which is already correct
    meta.tdeff = meta.td[:]
    meta.td = list(data.shape)

    for axis in range(parmode + 1):
        if meta.iscomplex[axis]:
            if axis != parmode:  # already done for last axis
                meta.td[axis] = meta.td[axis] // 2
            meta.tdeff[axis] = meta.tdeff[axis] // 2

    meta.sw_h = [(meta.sw[axis].m * meta.sfo1[axis] * 1e-6).to("Hz")
                 for axis in range(parmode + 1)]

    if processed:
        meta.si = [si for si in data.shape]
        meta.isfreq = [True] * (parmode + 1)  # at least we assume this
        meta.phc0 = [0] * data.ndim

    # this transformation is to make data coherent with bruker processing
    if meta.iscomplex[-1]:
        data = np.conj(data * np.exp(np.pi * 1j / 2.0))

    # normalised amplitudes to ns=1 and rg=1
    def _norm(dat):
        meta.ns = meta.get(
            "ns",
            [1] * data.ndim)  # sometimes these parameters are not present
        meta.rg = meta.get("rg", [1.0] * data.ndim)
        fac = float(meta.ns[-1]) * float(meta.rg[-1])
        meta.rgold = [meta.rg[-1]]
        meta.rg[-1] = 1.0
        meta.nsold = [meta.ns[-1]]  # store the old value of NS
        meta.ns[-1] = 1
        dat /= fac
        return dat

    data = _norm(data)

    # add some additional information in meta
    meta.expno = [int(expno)]

    # and the metadata (and make them readonly)
    meta.datatype = datatype
    meta.pathname = str(path)

    # add two parameters needed for phasing
    meta.pivot = [0] * data.ndim
    meta.exptc = [0] * data.ndim

    # make the corresponding axis
    # debug_('Create coords...')
    coords = []
    axe_range = list(range(parmode + 1))

    for axis in axe_range:
        if not meta.isfreq[axis]:
            # the axis is in time units
            dw = (1.0 / meta.sw_h[axis]).to("us")
            # coordpoints = np.arange(meta.td[axis])
            # coord = Coord(coordpoints * dw,
            #             title=f"F{axis + 1} acquisition time")  # TODO: use AQSEQ for >2D data
            coord = LinearCoord(
                offset=0.0,
                increment=dw,
                units="us",
                size=meta.td[axis],
                title=f"F{axis + 1} acquisition time",
            )
            coord.meta.larmor = meta.sfo1[axis]
            coords.append(coord)
        else:
            size = meta.si[axis]
            sizem = max(size - 1, 1)
            deltaf = -meta.sw_h[axis] / sizem
            first = meta.sfo1[axis] - meta.sf[axis] - deltaf * sizem / 2.0

            # coord = Coord(np.arange(size) * deltaf + first)
            coord = LinearCoord(offset=first, increment=deltaf, size=size)
            coord.meta.larmor = meta.sfo1[
                axis]  # needed for ppm transformation
            coord.ito("ppm")
            if meta.nuc1 is not None:
                nuc1 = meta.nuc1[axis]
                regex = r"([^a-zA-Z]+)([a-zA-Z]+)"
                m = re.match(regex, nuc1)
                mass = m[1]
                name = m[2]
                nucleus = "^{" + mass + "}" + name
            else:
                nucleus = ""
            coord.title = rf"$\delta\ {nucleus}$"
            coords.append(coord)

    dataset.data = data

    for axis, cplex in enumerate(meta.iscomplex[::-1]):
        if cplex and axis > 0:
            dataset.set_quaternion(inplace=True)

    dataset.meta.update(meta)
    dataset.meta.readonly = True
    dataset.set_coordset(*tuple(coords))

    dataset.title = "intensity"
    dataset.origin = "topspin"
    dataset.name = f"{f_name.name} expno:{expno} procno:{procno} ({datatype})"
    dataset.filename = f_name

    return dataset
示例#19
0
def _read_txt(*args, **kwargs):
    # read Labspec *txt files or series

    dataset, filename = args
    content = kwargs.get("content", False)

    if content:
        pass
        # fid = io.StringIO(content)
        # TODO: get the l list of string

    else:
        fid = open(filename, "r", encoding="utf-8")
        try:
            lines = fid.readlines()
        except UnicodeDecodeError:
            fid = open(filename, "r", encoding="latin-1")
            lines = fid.readlines()
            fid.close()

    if len(lines) == 0:
        return

    # Metadata
    meta = Meta()

    i = 0
    while lines[i].startswith("#"):
        key, val = lines[i].split("=")
        key = key[1:]
        if key in meta.keys():
            key = f"{key} {i}"
        meta[key] = val.strip()
        i += 1

    # .txt extension is fairly common. We determine non labspc files based
    # on the absence of few keys. Two types of files (1D or 2D) are considered:
    labspec_keys_1D = ["Acq. time (s)", "Dark correction"]
    labspec_keys_2D = ["Exposition", "Grating"]

    if all(keywd in meta.keys() for keywd in labspec_keys_1D):
        pass
    elif all(keywd in meta.keys() for keywd in labspec_keys_2D):
        pass
    else:
        # this is not a labspec txt file"
        return

    # read spec
    rawdata = np.genfromtxt(lines[i:], delimiter="\t")

    # populate the dataset
    if rawdata.shape[1] == 2:
        data = rawdata[:, 1][np.newaxis]
        _x = Coord(rawdata[:, 0], title="Raman shift", units="1/cm")
        _y = Coord(None, title="Time", units="s")
        date_acq, _y = _transf_meta(_y, meta)

    else:
        data = rawdata[1:, 1:]
        _x = Coord(rawdata[0, 1:], title="Raman shift", units="1/cm")
        _y = Coord(rawdata[1:, 0], title="Time", units="s")
        date_acq, _y = _transf_meta(_y, meta)

    # try to transform to linear coord
    _x.linear = True

    # if success linear should still be True
    if _x.linear:
        _x = LinearCoord(_x)

    # set dataset metadata
    dataset.data = data
    dataset.set_coordset(y=_y, x=_x)
    dataset.title = "Counts"
    dataset.units = None
    dataset.name = filename.stem
    dataset.meta = meta

    # date_acq is Acquisition date at start (first moment of acquisition)
    dataset.description = "Spectrum acquisition : " + str(date_acq)

    # Set the NDDataset date
    dataset._date = datetime.datetime.now(datetime.timezone.utc)
    dataset._modified = dataset.date

    # Set origin, description and history
    dataset.history = f"{dataset.date}:imported from LabSpec6 text file {filename}"

    return dataset
示例#20
0
def plot_2D(dataset, method=None, **kwargs):
    """
    Plot of 2D array.

    Parameters
    ----------
    dataset : |NDDataset|
        The dataset to plot.
    method : ['stack', 'map', 'image'] , optional
        The method of plot of the dataset, which will determine the plotter to use.
        Default method is given 'stack' but this can be changed using
        ``dataset.preference.method_2D``.
    **kwargs
        Optional keyword parameters (see Other Parameters).

    Other Parameters
    ----------------
    {0}

    See Also
    --------
    plot_map
    plot_stack
    plot_image
    plot_surface
    plot_waterfall
    """

    # Get preferences
    # ------------------------------------------------------------------------

    prefs = dataset.preferences

    # before going further, check if the style is passed in the parameters
    style = kwargs.pop("style", None)
    if style is not None:
        prefs.style = style
    # else we assume this has been set before calling plot()

    prefs.set_latex_font(prefs.font.family)  # reset latex settings

    # Redirections ?
    # ------------------------------------------------------------------------

    # should we redirect the plotting to another method
    if dataset._squeeze_ndim < 2:
        return dataset.plot_1D(**kwargs)

    # if plotly execute plotly routine not this one
    if kwargs.get("use_plotly", prefs.use_plotly):
        return dataset.plotly(**kwargs)

    # do not display colorbar if it's not a surface plot
    # except if we have asked to d so

    # often we do need to plot only data when plotting on top of a previous plot
    data_only = kwargs.get("data_only", False)

    # Get the data to plot
    # ---------------------------------------------------------------

    # if we want to plot the transposed dataset
    transposed = kwargs.get("transposed", False)
    if transposed:
        new = dataset.copy().T  # transpose dataset
        nameadd = ".T"
    else:
        new = dataset  # .copy()
        nameadd = ""
    new = new.squeeze()

    if kwargs.get("y_reverse", False):
        new = new[::-1]

    # Figure setup
    # ------------------------------------------------------------------------
    method = new._figure_setup(ndim=2, method=method, **kwargs)

    ax = new.ndaxes["main"]
    ax.name = ax.name + nameadd

    # Other properties that can be passed as arguments
    # ------------------------------------------------------------------------

    lw = kwargs.get("linewidth", kwargs.get("lw", prefs.lines_linewidth))
    alpha = kwargs.get("calpha", prefs.contour_alpha)

    number_x_labels = prefs.number_of_x_labels
    number_y_labels = prefs.number_of_y_labels
    number_z_labels = prefs.number_of_z_labels

    if method in ["waterfall"]:
        nxl = number_x_labels * 2
        nyl = number_z_labels * 2
    elif method in ["stack"]:
        nxl = number_x_labels
        nyl = number_z_labels
    else:
        nxl = number_x_labels
        nyl = number_y_labels

    ax.xaxis.set_major_locator(MaxNLocator(nbins=nxl))
    ax.yaxis.set_major_locator(MaxNLocator(nbins=nyl))
    if method not in ["surface"]:
        ax.xaxis.set_ticks_position("bottom")
        ax.yaxis.set_ticks_position("left")

    # the next lines are to avoid multipliers in axis scale
    formatter = ScalarFormatter(useOffset=False)
    ax.xaxis.set_major_formatter(formatter)
    ax.yaxis.set_major_formatter(formatter)

    # ------------------------------------------------------------------------
    # Set axis
    # ------------------------------------------------------------------------

    # set the abscissa axis
    # ------------------------------------------------------------------------
    # the actual dimension name is the last in the new.dims list
    dimx = new.dims[-1]
    x = getattr(new, dimx)
    if x is not None and x.implements("CoordSet"):
        # if several coords, take the default ones:
        x = x.default
    xsize = new.shape[-1]
    show_x_points = False
    if x is not None and hasattr(x, "show_datapoints"):
        show_x_points = x.show_datapoints
    if show_x_points:
        # remove data and units for display
        x = LinearCoord.arange(xsize)

    discrete_data = False

    if x is not None and (not x.is_empty or x.is_labeled):
        xdata = x.data
        if not np.any(xdata):
            if x.is_labeled:
                discrete_data = True
                # take into account the fact that sometimes axis have just labels
                xdata = range(1, len(x.labels) + 1)
    else:
        xdata = range(xsize)

    xl = [xdata[0], xdata[-1]]
    xl.sort()

    if xsize < number_x_labels + 1:
        # extend the axis so that the labels are not too close to the limits
        inc = abs(xdata[1] - xdata[0]) * 0.5
        xl = [xl[0] - inc, xl[1] + inc]

    if data_only:
        xl = ax.get_xlim()

    xlim = list(kwargs.get("xlim", xl))
    xlim.sort()
    xlim[-1] = min(xlim[-1], xl[-1])
    xlim[0] = max(xlim[0], xl[0])

    if kwargs.get("x_reverse", kwargs.get("reverse", x.reversed if x else False)):
        xlim.reverse()

    ax.set_xlim(xlim)

    xscale = kwargs.get("xscale", "linear")
    ax.set_xscale(xscale)  # , nonpositive='mask')

    # set the ordinates axis
    # ------------------------------------------------------------------------
    # the actual dimension name is the second in the new.dims list
    dimy = new.dims[-2]
    y = getattr(new, dimy)
    if y is not None and y.implements("CoordSet"):
        # if several coords, take the default ones:
        y = y.default
    ysize = new.shape[-2]

    show_y_points = False
    if y is not None and hasattr(y, "show_datapoints"):
        show_y_points = y.show_datapoints
    if show_y_points:
        # remove data and units for display
        y = LinearCoord.arange(ysize)

    if y is not None and (not y.is_empty or y.is_labeled):
        ydata = y.data

        if not np.any(ydata):
            if y.is_labeled:
                ydata = range(1, len(y.labels) + 1)
    else:
        ydata = range(ysize)

    yl = [ydata[0], ydata[-1]]
    yl.sort()

    if ysize < number_y_labels + 1:
        # extend the axis so that the labels are not too close to the limits
        inc = abs(ydata[1] - ydata[0]) * 0.5
        yl = [yl[0] - inc, yl[1] + inc]

    if data_only:
        yl = ax.get_ylim()

    ylim = list(kwargs.get("ylim", yl))
    ylim.sort()
    ylim[-1] = min(ylim[-1], yl[-1])
    ylim[0] = max(ylim[0], yl[0])

    yscale = kwargs.get("yscale", "linear")
    ax.set_yscale(yscale)

    # z intensity (by default we plot real component of the data)
    # ------------------------------------------------------------------------

    if not kwargs.get("imag", False):
        zdata = new.real.masked_data
    else:
        zdata = (
            new.RI.masked_data
        )  # new.imag.masked_data #TODO: quaternion case (3 imag.components)

    zlim = kwargs.get("zlim", (np.ma.min(zdata), np.ma.max(zdata)))

    if method in ["stack", "waterfall"]:

        # the z axis info
        # ---------------
        # zl = (np.min(np.ma.min(ys)), np.max(np.ma.max(ys)))
        amp = 0  # np.ma.ptp(zdata) / 50.
        zl = (np.min(np.ma.min(zdata) - amp), np.max(np.ma.max(zdata)) + amp)
        zlim = list(kwargs.get("zlim", zl))
        zlim.sort()
        z_reverse = kwargs.get("z_reverse", False)
        if z_reverse:
            zlim.reverse()

        # set the limits
        # ---------------

        if yscale == "log" and min(zlim) <= 0:
            # set the limits wrt smallest and largest strictly positive values
            ax.set_ylim(
                10 ** (int(np.log10(np.amin(np.abs(zdata)))) - 1),
                10 ** (int(np.log10(np.amax(np.abs(zdata)))) + 1),
            )
        else:
            ax.set_ylim(zlim)

    else:

        # the y axis info
        # ----------------
        if data_only:
            ylim = ax.get_ylim()

        ylim = list(kwargs.get("ylim", ylim))
        ylim.sort()
        y_reverse = kwargs.get("y_reverse", y.reversed if y else False)
        if y_reverse:
            ylim.reverse()

        # set the limits
        # ----------------
        ax.set_ylim(ylim)

    # ------------------------------------------------------------------------
    # plot the dataset
    # ------------------------------------------------------------------------
    ax.grid(prefs.axes_grid)

    normalize = kwargs.get("normalize", None)
    cmap = kwargs.get("colormap", kwargs.get("cmap", prefs.colormap))

    if method in ["map", "image", "surface"]:
        zmin, zmax = zlim
        zmin = min(zmin, -zmax)
        zmax = max(-zmin, zmax)
        norm = mpl.colors.Normalize(vmin=zmin, vmax=zmax)

    if method in ["surface"]:
        X, Y = np.meshgrid(xdata, ydata)
        Z = zdata.copy()

        # masker data not taken into account in surface plot
        Z[dataset.mask] = np.nan

        # Plot the surface.  #TODO : improve this (or remove it)

        antialiased = kwargs.get("antialiased", prefs.antialiased)
        rcount = kwargs.get("rcount", prefs.rcount)
        ccount = kwargs.get("ccount", prefs.ccount)
        ax.set_facecolor("w")
        ax.plot_surface(
            X,
            Y,
            Z,
            cmap=cmap,
            linewidth=lw,
            antialiased=antialiased,
            rcount=rcount,
            ccount=ccount,
            edgecolor="k",
            norm=norm,
        )

    if method in ["waterfall"]:
        _plot_waterfall(ax, new, xdata, ydata, zdata, prefs, xlim, ylim, zlim, **kwargs)

    elif method in ["image"]:

        cmap = kwargs.get("cmap", kwargs.get("image_cmap", prefs.image_cmap))
        if discrete_data:
            method = "map"

        else:
            kwargs["nlevels"] = 500
            if not hasattr(new, "clevels") or new.clevels is None:
                new.clevels = _get_clevels(zdata, prefs, **kwargs)
            c = ax.contourf(xdata, ydata, zdata, new.clevels, alpha=alpha)
            c.set_cmap(cmap)
            c.set_norm(norm)

    elif method in ["map"]:
        if discrete_data:

            _colormap = plt.get_cmap(cmap)
            scalarMap = mpl.cm.ScalarMappable(norm=norm, cmap=_colormap)

            # marker = kwargs.get('marker', kwargs.get('m', None))
            markersize = kwargs.get("markersize", kwargs.get("ms", 5.0))
            # markevery = kwargs.get('markevery', kwargs.get('me', 1))

            for i in ydata:
                for j in xdata:
                    (li,) = ax.plot(j, i, lw=lw, marker="o", markersize=markersize)
                    li.set_color(scalarMap.to_rgba(zdata[i - 1, j - 1]))

        else:
            # contour plot
            # -------------
            if not hasattr(new, "clevels") or new.clevels is None:
                new.clevels = _get_clevels(zdata, prefs, **kwargs)

            c = ax.contour(xdata, ydata, zdata, new.clevels, linewidths=lw, alpha=alpha)
            c.set_cmap(cmap)
            c.set_norm(norm)

    elif method in ["stack"]:

        # stack plot
        # ----------

        # now plot the collection of lines
        # --------------------------------
        # map colors using the colormap

        vmin, vmax = ylim
        norm = mpl.colors.Normalize(
            vmin=vmin, vmax=vmax
        )  # we normalize to the max time
        if normalize is not None:
            norm.vmax = normalize

        _colormap = plt.get_cmap(cmap)
        scalarMap = mpl.cm.ScalarMappable(norm=norm, cmap=_colormap)

        # we display the line in the reverse order, so that the last
        # are behind the first.

        clear = kwargs.get("clear", True)
        lines = []
        if not clear and not transposed:
            lines.extend(ax.lines)  # keep the old lines

        line0 = mpl.lines.Line2D(xdata, zdata[0], lw=lw, picker=True)

        for i in range(zdata.shape[0]):
            li = cpy(line0)
            li.set_ydata(zdata[i])
            lines.append(li)
            li.set_color(scalarMap.to_rgba(ydata[i]))
            fmt = kwargs.get("label_fmt", "{:.5f}")
            li.set_label(fmt.format(ydata[i]))
            li.set_zorder(zdata.shape[0] + 1 - i)

        # store the full set of lines
        new._ax_lines = lines[:]

        # but display only a subset of them in order to accelerate the drawing
        maxlines = kwargs.get("maxlines", prefs.max_lines_in_stack)
        setpy = max(len(new._ax_lines) // maxlines, 1)

        for line in new._ax_lines[::setpy]:
            ax.add_line(line)

    if data_only or method in ["waterfall"]:
        # if data only (we will not set axes and labels
        # it was probably done already in a previous plot
        new._plot_resume(dataset, **kwargs)
        return ax

    # display a title
    # ------------------------------------------------------------------------
    title = kwargs.get("title", None)
    if title:
        ax.set_title(title)
    elif kwargs.get("plottitle", False):
        ax.set_title(new.name)

    # ------------------------------------------------------------------------
    # labels
    # ------------------------------------------------------------------------

    # x label
    # ------------------------------------------------------------------------
    xlabel = kwargs.get("xlabel", None)
    if show_x_points:
        xlabel = "data points"
    if not xlabel:
        xlabel = make_label(x, new.dims[-1])
    ax.set_xlabel(xlabel)

    uselabelx = kwargs.get("uselabel_x", False)
    if (
        x
        and x.is_labeled
        and (uselabelx or not np.any(x.data))
        and len(x.labels) < number_x_labels + 1
    ):
        # TODO refine this to use different orders of labels
        ax.set_xticks(xdata)
        ax.set_xticklabels(x.labels)

    # y label
    # ------------------------------------------------------------------------
    ylabel = kwargs.get("ylabel", None)
    if show_y_points:
        ylabel = "data points"
    if not ylabel:
        if method in ["stack"]:
            ylabel = make_label(new, "values")

        else:
            ylabel = make_label(y, new.dims[-2])
            # y tick labels
            uselabely = kwargs.get("uselabel_y", False)
            if (
                y
                and y.is_labeled
                and (uselabely or not np.any(y.data))
                and len(y.labels) < number_y_labels
            ):
                # TODO refine this to use different orders of labels
                ax.set_yticks(ydata)
                ax.set_yticklabels(y.labels)

    # z label
    # ------------------------------------------------------------------------
    zlabel = kwargs.get("zlabel", None)
    if not zlabel:
        if method in ["stack"]:
            zlabel = make_label(y, new.dims[-2])
        elif method in ["surface"]:
            zlabel = make_label(new, "values")
            ax.set_zlabel(zlabel)
        else:
            zlabel = make_label(new, "z")

    # do we display the ordinate axis?
    if kwargs.get("show_y", True):
        ax.set_ylabel(ylabel)
    else:
        ax.set_yticks([])

    if "colorbar" in new.ndaxes:
        if "surface" not in method and (not hasattr(new, "_axcb") or not new._axcb):
            axec = new.ndaxes["colorbar"]
            axec.name = axec.name + nameadd
            new._axcb = mpl.colorbar.ColorbarBase(
                axec, cmap=plt.get_cmap(cmap), norm=norm
            )
            new._axcb.set_label(zlabel)
    #        else:
    #            new._fig.colorbar(surf, shrink=0.5, aspect=10)

    # do we display the zero line
    if kwargs.get("show_zero", False):
        ax.haxlines()

    new._plot_resume(dataset, **kwargs)

    return ax