Beispiel #1
0
    def set_laser_frequency(self, frequency=15798.26 * ur("cm^-1")):

        if not isinstance(frequency, Quantity):
            frequency = frequency * ur("cm^-1")

        frequency.ito("Hz")
        self.meta.laser_frequency = frequency

        if self._use_time:
            spacing = 1.0 / frequency
            spacing.ito("picoseconds")

            self.increment = spacing.m
            self.offset = 0
            self._units = ur.picoseconds
            self.title = "time"

        else:
            frequency.ito("cm^-1")
            spacing = 1.0 / frequency
            spacing.ito("mm")

            self.increment = spacing.m
            self.offset = -self.increment * self._zpd
            self._units = ur.mm
            self.title = "optical path difference"
def test_slicing_with_quantities(ds1):
    da = ds1.copy()

    da00 = da[1000.0 * ur("cm^-1"), 0]
    assert da00.shape == (1, 1, 3)
    assert da00.coordset["x"] == da00.coordset[0]
    assert da00.coordset["x"] == da.coordset[0]

    with pytest.raises(ValueError):
        _ = da[1000.0 * ur.K, 0]  # wrong units
Beispiel #3
0
def _read_srs(*args, **kwargs):
    dataset, filename = args
    frombytes = kwargs.get("frombytes", False)

    return_bg = kwargs.get("return_bg", False)

    if frombytes:
        # in this case, filename is actually a byte content
        fid = io.BytesIO(filename)  # pragma: no cover
    else:
        fid = open(filename, "rb")

    # determine whether the srs is reprocessed. At pos=292 (hex:124) appears a difference between
    # and reprocessed series
    fid.seek(292)
    key = _fromfile(fid, dtype="uint8", count=16)[0]
    if key == 39:  # (hex: 27)
        is_reprocessed = False
    elif key == 15:  # (hex = 0F)
        is_reprocessed = True
    # if key == 72 (hex:48), could be TGA

    """ At pos=304 (hex:130) is the position of the '02' key for series. Herte we don't use it.
    Instead, we use the following sequence :
    b'\x02\x00\x00\x00\x18\x00\x00\x00\x00\x00\x48\x43\x00\x50\x43\x47'
    which appears 3 times in rapid-scan srs. They are used to assert the srs file is rapid_scan
    and to locate headers and data:
    - The 1st one is located 152 bytes after the series header position
    - The 2nd one is located 152 bytes before the background header position and
       56 bytes before either the background data / or the background title and infos
       followed by the background data
    - The 3rd one is located 64 bytes before the series data (spectre/ifg names and
    intensities"""

    sub = b"\x02\x00\x00\x00\x18\x00\x00\x00\x00\x00\x48\x43\x00\x50\x43\x47"

    # find the 3 starting indexes of sub. we will use the 1st (-> series info),
    # the 2nd (-> background) and the 3rd (-> data)
    fid.seek(0)
    bytestring = fid.read()
    start = 0
    index = []
    while start != -1:
        i = bytestring.find(sub, start + 1)
        index.append(i)
        start = i
    index = np.array(index[:-1])

    if len(index) != 3:
        raise NotImplementedError("Only implemented for rapidscan")

    index += [-152, -152, 60]

    # read series data, except if the user asks for the background
    if not return_bg:
        info = _read_header(fid, index[0])
        # container for names and data
        names = []
        data = np.zeros((info["ny"], info["nx"]))

        # now read the spectra/interferogram names and data
        # the first one....
        pos = index[2]
        names.append(_readbtext(fid, pos, 256))
        pos += 84
        fid.seek(pos)
        data[0, :] = _fromfile(fid, dtype="float32", count=info["nx"])[:]
        pos += info["nx"] * 4
        # ... and the remaining ones:
        for i in np.arange(info["ny"])[1:]:
            pos += 16
            names.append(_readbtext(fid, pos, 256))
            pos += 84
            fid.seek(pos)
            data[i, :] = _fromfile(fid, dtype="float32", count=info["nx"])[:]
            pos += info["nx"] * 4

        # now get series history
        if not is_reprocessed:
            history = info["history"]
        else:
            # In reprocessed series the updated "DATA PROCESSING HISTORY" is located right after
            # the following 16 byte sequence:
            sub = b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"
            pos = bytestring.find(sub) + 16
            history = _readbtext(fid, pos, None)

    # read the background if the user asked for it.
    if return_bg:

        # First get background info
        info = _read_header(fid, index[1])

        if "background_name" not in info.keys():
            # it is a short header
            fid.seek(index[1] + 208)
            data = _fromfile(fid, dtype="float32", count=info["nx"])
        else:
            # longer header, in such case the header indicates a spectrum
            # but the data are those of an ifg... For now need more examples
            return None

    # Create NDDataset Object for the series
    if not return_bg:
        dataset = NDDataset(data)
    else:
        dataset = NDDataset(np.expand_dims(data, axis=0))

    # in case part of the spectra/ifg has been blanked:
    dataset.mask = np.isnan(dataset.data)

    dataset.units = info["units"]
    dataset.title = info["title"]
    dataset.origin = "omnic"

    # now add coordinates
    spacing = (info["lastx"] - info["firstx"]) / (info["nx"] - 1)
    _x = LinearCoord(
        offset=info["firstx"],
        increment=spacing,
        size=info["nx"],
        title=info["xtitle"],
        units=info["xunits"],
    )

    # specific infos for series data
    if not return_bg:
        dataset.name = info["name"]
        _y = Coord(
            np.around(np.linspace(info["firsty"], info["lasty"], info["ny"]), 3),
            title="Time",
            units="minute",
            labels=names,
        )

    else:
        _y = Coord()

    dataset.set_coordset(y=_y, x=_x)

    # Set origin, description and history
    dataset.origin = "omnic"
    dataset.description = kwargs.get("description", "Dataset from omnic srs file.")

    if "history" in locals():
        dataset.history.append(
            "Omnic 'DATA PROCESSING HISTORY' :\n"
            "--------------------------------\n" + history
        )
    dataset.history.append(
        str(datetime.now(timezone.utc)) + ": imported from srs file " + str(filename)
    )

    dataset.meta.laser_frequency = info["reference_frequency"] * ur("cm^-1")
    dataset.meta.collection_length = info["collection_length"] * ur("s")

    if dataset.x.units is None and dataset.x.title == "data points":
        # interferogram
        dataset.meta.interferogram = True
        dataset.meta.td = list(dataset.shape)
        dataset.x._zpd = int(np.argmax(dataset)[-1])  # zero path difference
        dataset.x.set_laser_frequency()
        dataset.x._use_time_axis = (
            False  # True to have time, else it will  be optical path difference
        )

        # uncomment below to load the last datafield has the same dimension as the time axis
        # its function is not known. related to Grams-schmidt ?

        # pos = _nextline(pos)
        # found = False
        # while not found:
        #     pos += 16
        #     f.seek(pos)
        #     key = _fromfile(f, dtype='uint8', count=1)
        #     if key == 1:
        #         pos += 4
        #         f.seek(pos)
        #         X = _fromfile(f, dtype='float32', count=info['ny'])
        #         found = True
        #
        # X = NDDataset(X)
        # _x = Coord(np.around(np.linspace(0, info['ny']-1, info['ny']), 0),
        #            title='time',
        #            units='minutes')
        # X.set_coordset(x=_x)
        # X.name = '?'
        # X.title = '?'
        # X.description = 'unknown'
        # X.history = str(datetime.now(timezone.utc)) + ':imported from srs

    fid.close()

    return dataset
Beispiel #4
0
def _read_spa(*args, **kwargs):
    dataset, filename = args
    content = kwargs.get("content", False)

    if content:
        fid = io.BytesIO(content)
    else:
        fid = open(filename, "rb")

    return_ifg = kwargs.get("return_ifg", None)

    # Read name:
    # The name  starts at position hex 1e = decimal 30. Its max length
    # is 256 bytes. It is the original filename under which the spectrum has
    # been saved: it won't match with the actual filename if a subsequent
    # renaming has been done in the OS.
    spa_name = _readbtext(fid, 30, 256)

    # The acquisition date (GMT) is at hex 128 = decimal 296.
    # Second since 31/12/1899, 00:00
    fid.seek(296)
    timestamp = _fromfile(fid, dtype="uint32", count=1)
    acqdate = datetime(1899, 12, 31, 0, 0, tzinfo=timezone.utc) + timedelta(
        seconds=int(timestamp)
    )
    acquisitiondate = acqdate

    # Transform back to timestamp for storage in the Coord object
    # use datetime.fromtimestamp(d, timezone.utc)) to transform back to datetime object
    timestamp = acqdate.timestamp()

    # From hex 120 = decimal 304, the spectrum is described
    # by a block of lines starting with "key values",
    # for instance hex[02 6a 6b 69 1b 03 82] -> dec[02 106  107 105 27 03 130]
    # Each of these lines provides positions of data and metadata in the file:
    #
    #     key: hex 02, dec  02: position of spectral header (=> nx,
    #                                 firstx, lastx, nscans, nbkgscans)
    #     key: hex 03, dec  03: intensity position
    #     #     key: hex 04, dec  04: user text position (custom info, can be present
    #                           several times. The text length is five bytes later)
    #     key: hex 1B, dec  27: position of History text, The text length
    #                           is five bytes later
    #     key: hex 53, dec  83: probably not a position, present when 'Retrieved from library'
    #     key: hex 64, dec 100: ?
    #     key: hex 66  dec 102: sample interferogram
    #     key: hex 67  dec 103: background interferogram
    #     key: hex 69, dec 105: ?
    #     key: hex 6a, dec 106: ?
    #     key: hex 80, dec 128: ?
    #     key: hex 82, dec 130: position of 'Experiment Information', The text length
    #                           is five bytes later. The block gives Experiment filename (at +10)
    #                           Experiment title (+90), custom text (+254), accessory name (+413)
    #     key: hex 92, dec 146: position of 'custom infos', The text length
    #                           is five bytes later.
    #
    # The line preceding the block start with '01' or '0A'
    # The lines after the block generally start with '00', except in few cases where
    # they start by '01'. In such cases, the '53' key is also present
    # (before the '1B').

    # scan "key values"
    pos = 304
    spa_comments = []  # several custom comments can be present
    while "continue":
        fid.seek(pos)
        key = _fromfile(fid, dtype="uint8", count=1)

        # print(key, end=' ; ')

        if key == 2:
            # read the position of the header
            fid.seek(pos + 2)
            pos_header = _fromfile(fid, dtype="uint32", count=1)
            info = _read_header(fid, pos_header)

        elif key == 3 and return_ifg is None:
            intensities = _getintensities(fid, pos)

        elif key == 4:
            fid.seek(pos + 2)
            comments_pos = _fromfile(fid, "uint32", 1)
            fid.seek(pos + 6)
            comments_len = _fromfile(fid, "uint32", 1)
            fid.seek(comments_pos)
            spa_comments.append(fid.read(comments_len).decode("latin-1", "replace"))

        elif key == 27:
            fid.seek(pos + 2)
            history_pos = _fromfile(fid, "uint32", 1)
            fid.seek(pos + 6)
            history_len = _fromfile(fid, "uint32", 1)
            spa_history = _readbtext(fid, history_pos, history_len)

        elif key == 102 and return_ifg == "sample":
            s_ifg_intensities = _getintensities(fid, pos)

        elif key == 103 and return_ifg == "background":
            b_ifg_intensities = _getintensities(fid, pos)

        elif key == 00 or key == 1:
            break

        pos += 16

    fid.close()

    if (return_ifg == "sample" and "s_ifg_intensities" not in locals()) or (
        return_ifg == "background" and "b_ifg_intensities" not in locals()
    ):
        info_("No interferogram found, read_spa returns None")
        return None
    elif return_ifg == "sample":
        intensities = s_ifg_intensities
    elif return_ifg == "background":
        intensities = b_ifg_intensities
    # load intensity into the  NDDataset
    dataset.data = np.array(intensities[np.newaxis], dtype="float32")

    if return_ifg == "background":
        title = "sample acquisition timestamp (GMT)"  # bckg acquisition date is not known for the moment...
    else:
        title = "acquisition timestamp (GMT)"  # no ambiguity here

    _y = Coord(
        [timestamp],
        title=title,
        units="s",
        labels=([acquisitiondate], [filename]),
    )

    # useful when a part of the spectrum/ifg has been blanked:
    dataset.mask = np.isnan(dataset.data)

    if return_ifg is None:
        default_description = f"# Omnic name: {spa_name}\n# Filename: {filename.name}"
        dataset.units = info["units"]
        dataset.title = info["title"]

        # now add coordinates
        nx = info["nx"]
        firstx = info["firstx"]
        lastx = info["lastx"]
        xunit = info["xunits"]
        xtitle = info["xtitle"]

        spacing = (lastx - firstx) / (nx - 1)

        _x = LinearCoord(
            offset=firstx, increment=spacing, size=nx, title=xtitle, units=xunit
        )

    else:  # interferogram
        if return_ifg == "sample":
            default_description = (
                f"# Omnic name: {spa_name} : sample IFG\n # Filename: {filename.name}"
            )
        else:
            default_description = f"# Omnic name: {spa_name} : background IFG\n # Filename: {filename.name}"
        spa_name += ": Sample IFG"
        dataset.units = "V"
        dataset.title = "detector signal"
        _x = LinearCoord(
            offset=0,
            increment=1,
            size=len(intensities),
            title="data points",
            units=None,
        )

    dataset.set_coordset(y=_y, x=_x)
    dataset.name = spa_name  # to be consistent with omnic behaviour
    dataset.filename = str(filename)

    # Set origin, description, history, date
    # Omnic spg file don't have specific "origin" field stating the oirigin of the data

    dataset.description = kwargs.get("description", default_description) + "\n"
    if len(spa_comments) > 1:
        dataset.description += "# Comments from Omnic:\n"
        for comment in spa_comments:
            dataset.description += comment + "\n---------------------\n"

    dataset.history = str(datetime.now(timezone.utc)) + ":imported from spa file(s)"

    if "spa_history" in locals():
        if len("spa_history".strip(" ")) > 0:
            dataset.history = (
                "Data processing history from Omnic :\n------------------------------------\n"
                + spa_history
            )

    dataset._date = datetime.now(timezone.utc)

    dataset.meta.collection_length = info["collection_length"] / 100 * ur("s")
    dataset.meta.optical_velocity = info["optical_velocity"]
    dataset.meta.laser_frequency = info["reference_frequency"] * ur("cm^-1")

    if dataset.x.units is None and dataset.x.title == "data points":
        # interferogram
        dataset.meta.interferogram = True
        dataset.meta.td = list(dataset.shape)
        dataset.x._zpd = int(np.argmax(dataset)[-1])
        dataset.x.set_laser_frequency()
        dataset.x._use_time_axis = (
            False  # True to have time, else it will be optical path difference
        )

    return dataset
def _read_topspin(*args, **kwargs):
    debug_("Bruker TOPSPIN file reading")
    dataset, path = args
    #    content = kwargs.get('content', None)

    # is-it a processed dataset (1r, 2rr ....
    processed = True if path.match("pdata/*/*") else False

    # ------------------------------------------------------------------------
    # start reading ....
    # ------------------------------------------------------------------------

    parents = path.parents

    # Get data and acquisition parameters

    if not processed:
        # a fid or a ser has been selected
        f_expno = parents[0]
        expno = f_expno.name
        procno = kwargs.get("procno", "1")
        f_procno = f_expno / "pdata" / procno
        f_name = parents[1]

    else:
        # a processes spectra has been selected (1r, ....)
        f_procno = parents[0]
        procno = f_procno.name
        f_expno = parents[2]
        expno = f_expno.name
        f_name = parents[3]

    acqus_files = _get_files(f_expno, "acqu")
    procs_files = _get_files(f_procno, "proc")

    if not processed:

        dic, data = read_fid(f_expno,
                             acqus_files=acqus_files,
                             procs_files=procs_files)

        # apply a -90 phase shift to be compatible with topspin
        data = data * np.exp(-1j * np.pi / 2.0)

        # Look the case when the reshaping was not correct
        # for example, this happen when the number
        # of accumulated row was incomplete
        if path.name in ["ser"] and data.ndim == 1:
            # we must reshape using the acqu parameters
            td1 = dic["acqu2"]["TD"]
            try:
                data = data.reshape(td1, -1)
            except ValueError:
                try:
                    td = dic["acqu"]["TD"] // 2
                    data = data.reshape(-1, td)
                except ValueError:
                    raise KeyError("Inconsistency between TD's and data size")

            # reduce to td
            ntd = dic["acqus"]["TD"] // 2
            data = data[..., :ntd]

        # Eliminate the digital filter
        if kwargs.get("remove_digital_filter",
                      True) and dic["acqus"]["DECIM"] > 1:
            data = _remove_digital_filter(dic, data)

    else:

        dic, datalist = read_pdata(
            f_procno,
            acqus_files=acqus_files,
            procs_files=procs_files,
            all_components=True,
        )
        if isinstance(datalist, list):
            if datalist[0].ndim == 2:
                data, dataRI, dataIR, dataII = datalist
                # make quaternion
                shape = data.shape
                data = as_quat_array(
                    list(
                        zip(
                            data.flatten(),
                            dataRI.flatten(),
                            dataIR.flatten(),
                            dataII.flatten(),
                        )))
                data = data.reshape(shape)

            elif datalist[0].ndim == 1:
                # make complex
                data, dataI = datalist
                data = data + dataI * 1.0j

            else:
                return None
        else:
            data = datalist

    # ........................................................................................................
    # we now make some rearrangement of the dic to have something more user friendly
    # we assume that all experiments have similar (important) parameters so that the experiments are compatibles

    meta = Meta()  # This is the parameter dictionary
    datatype = path.name.upper() if not processed else f"{data.ndim}D"

    keys = sorted(dic.keys())

    # we need the ndim of the data
    parmode = int(dic["acqus"].get("PARMODE", data.ndim - 1))
    if parmode + 1 != data.ndim:
        raise KeyError(
            f"The NMR data were not read properly as the PARMODE+1 parameter ({parmode + 1}) doesn't fit"
            f" the actual number of dimensions ({data.ndim})")

    # read the acqu and proc
    valid_keys = list(zip(*nmr_valid_meta))[0]
    keys_units = dict(nmr_valid_meta)

    for item in keys:

        if item[:4] in ["acqu", "proc"]:
            dim = parmode
            if len(item) > 4 and item[4] in ["2", "3"]:
                dim = parmode + 1 - int(item[4])

            for key in sorted(dic[item]):

                if key.startswith("_") or key.lower() not in valid_keys:
                    continue

                value = dic[item][key]
                units = ur(keys_units[key.lower()]) if keys_units[
                    key.lower()] else None

                if units is not None:
                    if isinstance(value, (float, int)):
                        value = value * units  # make a quantity
                    elif isinstance(value, list) and isinstance(
                            value[0], (float, int)):
                        value = np.array(value) * units

                if key.lower() not in meta:
                    meta[key.lower()] = [None] * data.ndim

                try:
                    meta[key.lower()][dim] = value
                except Exception:
                    pass

        else:

            meta[item.lower()] = dic[item]

    # Warning: from now all parameter keys are lowercase.

    # correct some initial values

    meta.encoding = [0] * (parmode + 1)
    meta.iscomplex = [False] * (parmode + 1)

    if not processed:
        meta.isfreq = [False]
        meta.encoding[-1] = AQ_mod[meta.aq_mod[-1]]
        meta.iscomplex[-1] = meta.aq_mod[-1] > 0

    if datatype in ["SER"]:
        meta.isfreq.insert(0, False)

        if meta.fnmode[-2] == 0:
            # For historical reasons,
            # MC2 is interpreted when the acquisition status
            # parameter FnMODE has the value undefined, i.e. 0
            if meta.mc2 is not None:
                meta.fnmode[-2] = meta.mc2[-2] + 1

        meta.encoding[-2] = FnMODE[meta.fnmode[-2]]
        meta.iscomplex[-2] = meta.fnmode[-2] > 1

        if parmode == 2:
            meta.isfreq.insert(0, False)
            if meta.fnmode[-3] == 0 and meta.mc2 is not None:
                meta.fnmode[-3] = meta.mc2[-3] + 1
            meta.encoding[-3] = FnMODE[meta.fnmode[-3]]
            meta.iscomplex[-3] = meta.fnmode[-3] > 1

    # correct TD, so it is the number of complex points, not the number of data
    # not for the last dimension which is already correct
    meta.tdeff = meta.td[:]
    meta.td = list(data.shape)

    for axis in range(parmode + 1):
        if meta.iscomplex[axis]:
            if axis != parmode:  # already done for last axis
                meta.td[axis] = meta.td[axis] // 2
            meta.tdeff[axis] = meta.tdeff[axis] // 2

    meta.sw_h = [(meta.sw[axis].m * meta.sfo1[axis] * 1e-6).to("Hz")
                 for axis in range(parmode + 1)]

    if processed:
        meta.si = [si for si in data.shape]
        meta.isfreq = [True] * (parmode + 1)  # at least we assume this
        meta.phc0 = [0] * data.ndim

    # this transformation is to make data coherent with bruker processing
    if meta.iscomplex[-1]:
        data = np.conj(data * np.exp(np.pi * 1j / 2.0))

    # normalised amplitudes to ns=1 and rg=1
    def _norm(dat):
        meta.ns = meta.get(
            "ns",
            [1] * data.ndim)  # sometimes these parameters are not present
        meta.rg = meta.get("rg", [1.0] * data.ndim)
        fac = float(meta.ns[-1]) * float(meta.rg[-1])
        meta.rgold = [meta.rg[-1]]
        meta.rg[-1] = 1.0
        meta.nsold = [meta.ns[-1]]  # store the old value of NS
        meta.ns[-1] = 1
        dat /= fac
        return dat

    data = _norm(data)

    # add some additional information in meta
    meta.expno = [int(expno)]

    # and the metadata (and make them readonly)
    meta.datatype = datatype
    meta.pathname = str(path)

    # add two parameters needed for phasing
    meta.pivot = [0] * data.ndim
    meta.exptc = [0] * data.ndim

    # make the corresponding axis
    # debug_('Create coords...')
    coords = []
    axe_range = list(range(parmode + 1))

    for axis in axe_range:
        if not meta.isfreq[axis]:
            # the axis is in time units
            dw = (1.0 / meta.sw_h[axis]).to("us")
            # coordpoints = np.arange(meta.td[axis])
            # coord = Coord(coordpoints * dw,
            #             title=f"F{axis + 1} acquisition time")  # TODO: use AQSEQ for >2D data
            coord = LinearCoord(
                offset=0.0,
                increment=dw,
                units="us",
                size=meta.td[axis],
                title=f"F{axis + 1} acquisition time",
            )
            coord.meta.larmor = meta.sfo1[axis]
            coords.append(coord)
        else:
            size = meta.si[axis]
            sizem = max(size - 1, 1)
            deltaf = -meta.sw_h[axis] / sizem
            first = meta.sfo1[axis] - meta.sf[axis] - deltaf * sizem / 2.0

            # coord = Coord(np.arange(size) * deltaf + first)
            coord = LinearCoord(offset=first, increment=deltaf, size=size)
            coord.meta.larmor = meta.sfo1[
                axis]  # needed for ppm transformation
            coord.ito("ppm")
            if meta.nuc1 is not None:
                nuc1 = meta.nuc1[axis]
                regex = r"([^a-zA-Z]+)([a-zA-Z]+)"
                m = re.match(regex, nuc1)
                mass = m[1]
                name = m[2]
                nucleus = "^{" + mass + "}" + name
            else:
                nucleus = ""
            coord.title = rf"$\delta\ {nucleus}$"
            coords.append(coord)

    dataset.data = data

    for axis, cplex in enumerate(meta.iscomplex[::-1]):
        if cplex and axis > 0:
            dataset.set_quaternion(inplace=True)

    dataset.meta.update(meta)
    dataset.meta.readonly = True
    dataset.set_coordset(*tuple(coords))

    dataset.title = "intensity"
    dataset.origin = "topspin"
    dataset.name = f"{f_name.name} expno:{expno} procno:{procno} ({datatype})"
    dataset.filename = f_name

    return dataset
Beispiel #6
0
# %%
ir.x.show_datapoints = False
_ = ir.plot(xlim=(-0.04, 0.04))

# %% [markdown]
# Note that the `x` scale of the interferogram has been calculated using the laser frequency indicated in the original
# omnic file. It is stored in the `meta` attribute of the NDDataset:

# %%
print(ir.meta.laser_frequency)

# %% [markdown]
# If absent, it can be set using the `set_laser_frequency()` method, e.g.:

# %%
ir.x.set_laser_frequency(15798.26 * ur("cm^-1"))

# %% [markdown]
# Now we can perform the Fourier transform. By default, no zero-filling level is applied prior the Fourier transform
# for FTIR. To add some level of zero-filling, use the `zf` method.

# %%
ird = ir.dc()
ird = ird.zf(size=2 * ird.size)
irt = ird.fft()

_ = irt.plot(xlim=(3999, 400))

# %% [markdown]
# A `Happ-Genzel` (Hamming window) apodization can also be applied prior to the
# Fourier transformation in order to decrease the H2O narrow bands.
Beispiel #7
0
def test_coordset_set(coord0, coord1, coord2):
    coords = CoordSet(coord2, [coord0, coord0.copy()], coord1)
    assert (
        str(coords) == repr(coords) ==
        "CoordSet: [x:time-on-stream, y:[_1:wavenumber, _2:wavenumber], z:temperature]"
    )

    coords.set_titles("time", "dddd", "celsius")
    assert (str(coords) ==
            "CoordSet: [x:time, y:[_1:wavenumber, _2:wavenumber], z:celsius]")

    coords.set_titles(x="time", z="celsius", y_1="length")
    assert (str(coords) == repr(coords) ==
            "CoordSet: [x:time, y:[_1:length, _2:wavenumber], z:celsius]")

    coords.set_titles("t", ("l", "g"), x="x")
    assert str(coords) == "CoordSet: [x:x, y:[_1:l, _2:g], z:celsius]"

    coords.set_titles(("t", ("l", "g")), z="z")
    assert str(coords) == "CoordSet: [x:t, y:[_1:l, _2:g], z:z]"

    coords.set_titles()  # nothing happens
    assert str(coords) == "CoordSet: [x:t, y:[_1:l, _2:g], z:z]"

    with pytest.raises(DimensionalityError):  # because units doesn't match
        coords.set_units(("km/s", ("s", "m")), z="radian")

    coords.set_units(("km/s", ("s", "m")), z="radian",
                     force=True)  # force change
    assert str(coords) == "CoordSet: [x:t, y:[_1:l, _2:wavelength], z:z]"
    assert coords.y_1.units == ur("s")

    # set item

    coords["z"] = coord2
    assert str(
        coords) == "CoordSet: [x:t, y:[_1:l, _2:wavelength], z:temperature]"

    coords["temperature"] = coord1
    assert str(
        coords) == "CoordSet: [x:t, y:[_1:l, _2:wavelength], z:time-on-stream]"

    coords["y_2"] = coord2
    assert str(
        coords
    ) == "CoordSet: [x:t, y:[_1:l, _2:temperature], z:time-on-stream]"

    coords["_1"] = coord2
    assert (
        str(coords) ==
        "CoordSet: [x:t, y:[_1:temperature, _2:temperature], z:time-on-stream]"
    )

    coords["t"] = coord2
    assert (
        str(coords) ==
        "CoordSet: [x:temperature, y:[_1:temperature, _2:temperature], z:time-on-stream]"
    )

    coord2.title = "zaza"
    coords["temperature"] = coord2
    assert (
        str(coords) ==
        "CoordSet: [x:zaza, y:[_1:temperature, _2:temperature], z:time-on-stream]"
    )

    coords["temperature"] = coord2
    assert (
        str(coords) ==
        "CoordSet: [x:zaza, y:[_1:zaza, _2:temperature], z:time-on-stream]")

    coords.set(coord1, coord0, coord2)
    assert str(coords) == "CoordSet: [x:zaza, y:wavenumber, z:time-on-stream]"

    coords.z = coord0
    assert str(coords) == "CoordSet: [x:zaza, y:wavenumber, z:wavenumber]"

    coords.zaza = coord0
    assert str(
        coords) == "CoordSet: [x:wavenumber, y:wavenumber, z:wavenumber]"

    coords.wavenumber = coord2
    assert str(coords) == "CoordSet: [x:zaza, y:wavenumber, z:wavenumber]"
def test_models():

    model = scp.asymmetricvoigtmodel()
    assert model.args == ["ampl", "pos", "width", "ratio", "asym"]

    x = np.arange(1000)
    ampl = 1000.0
    width = 100
    ratio = 0
    asym = 1.5
    pos = 500

    max = 6.366197723675813

    array = model.f(x, ampl, pos, width, ratio, asym)
    assert array.shape == (1000, )
    assert_approx_equal(array[pos], max, significant=4)

    array = model.f(x, 2.0 * ampl, pos, width, ratio, asym)  # ampl=2.
    assert_approx_equal(array[pos], max * 2.0, significant=4)

    # x array with units
    x1 = x * ur("cm")
    array = model.f(x1, ampl, pos, width, ratio, asym)
    assert_approx_equal(array[pos], max, significant=4)
    assert not hasattr(array, "units")

    # amplitude with units
    ampl = 1000.0 * ur("g")
    array = model.f(x1, ampl, pos, width, ratio, asym)
    assert hasattr(array, "units")
    assert array.units == ur("g")
    assert_approx_equal(array[pos].m, max, significant=4)

    # use keyword instead of positional parameters
    array = model.f(x1, ampl, pos, asym=asym, width=width, ratio=ratio)
    assert_approx_equal(array[pos].m, max, significant=4)

    # rescale some parameters
    array = model.f(x1,
                    width=1000.0 * ur("mm"),
                    ratio=ratio,
                    asym=asym,
                    ampl=ampl,
                    pos=pos)
    assert_approx_equal(array[pos].m, max, significant=4)

    # x is a Coord object
    x2 = scp.LinearCoord.arange(1000)
    width = 100.0
    array = model.f(x2, ampl, pos, width, ratio, asym)
    assert isinstance(array, scp.NDDataset)
    assert_approx_equal(array[pos].value.m, max, significant=4)
    assert array.units == ampl.units

    # x is a Coord object with units
    x3 = scp.LinearCoord.linspace(0.0,
                                  0.999,
                                  1000,
                                  units="m",
                                  title="distance")
    width = 100.0 * ur("mm")
    pos = 0.5
    array = model.f(x3, ampl, pos, width, ratio, asym)
    assert hasattr(array, "units")
    assert_approx_equal(array[500].m, max, significant=4)

    # do the same for various models
    kwargs = dict(
        ampl=1.0 * ur["g"],
        width=100.0 * ur("mm"),
        ratio=0.5,
        asym=2,
        pos=0.5,
        c_2=1.0,
    )

    for modelname, expected in [
        ("gaussianmodel", 0.9394292818892936),
        ("lorentzianmodel", 0.6366197723675814),
        ("voigtmodel", 0.8982186579508358),
        ("asymmetricvoigtmodel", 0.8982186579508358),
        ("polynomialbaseline", 0.0),
        ("sigmoidmodel", 50),
    ]:
        model = getattr(scp, modelname)()
        if modelname == "sigmoid":
            kwargs["width"] = 0.01
        array = model.f(x3, **kwargs)
        actual = array[pos].value
        if modelname != "sigmoid":
            actual = actual * 100
        assert_approx_equal(actual.m, expected, 4)