Exemplo n.º 1
0
def fastclean(npkd, nsigma=2.0, nbseg=20, axis=0):
    """
    set to zeros all points below nsigma times the noise level
    This allows the corresponding data-set, once stored to file, to be considerably more compressive.
    
    nsigma: float
        the ratio used, typically 1.0 to 3.0 (higher compression)
    nbseg: int
        the number of segments used for noise evaluation, see util.signal_tools.findnoiselevel
    axis: int
        the axis on which the noise is evaluated, default is fastest varying dimension
    """
    todo = npkd.test_axis(axis)
    if npkd.dim == 1:
        noise = findnoiselevel(npkd.get_buffer(), nbseg=nbseg)
        npkd.zeroing(nsigma * noise)
    elif npkd.dim == 2:
        if todo == 2:
            for i in xrange(npkd.size1):
                npkd.set_row(i,
                             npkd.row(i).fastclean(nsigma=nsigma, nbseg=nbseg))
        elif todo == 1:
            for i in xrange(npkd.size2):
                npkd.set_col(i,
                             npkd.col(i).fastclean(nsigma=nsigma, nbseg=nbseg))
    else:
        raise NPKError("a faire")
    return npkd
Exemplo n.º 2
0
def sg(npkd, window_size, order, deriv=0, axis=0):
    """applies Savitzky-Golay of order filter to data
    window_size : int
        the length of the window. Must be an odd integer number.
    order : int
        the order of the polynomial used in the filtering.
        Must be less than `window_size` - 1.
    deriv: int
        the order of the derivative to compute (default = 0 means only smoothing)
    axis: int
        the axis on which the filter is to be applied, default is fastest varying dimension
    """
    import spike.Algo.savitzky_golay as sgm
    todo = npkd.test_axis(axis)
    m = sgm.sgolay_coef(window_size, order, deriv=0)
    if npkd.dim == 1:
        npkd.set_buffer(sgm.sgolay_comp(npkd.get_buffer(), m, window_size))
    elif npkd.dim == 2:
        if todo == 2:
            for i in xrange(npkd.size1):
                npkd.buffer[i, :] = sgm.sgolay_comp(npkd.buffer[i, :], m,
                                                    window_size)
        elif todo == 1:
            for i in xrange(1, npkd.size2):
                npkd.buffer[:, i] = sgm.sgolay_comp(npkd.buffer[:, i], m,
                                                    window_size)
    else:
        raise NPKError("a faire")
    return npkd
Exemplo n.º 3
0
def lpext2d(npkd, final_size, lprank=10, algotype="burg"):
    """
    extends a 2D FID in F1 up to final_size, using lprank coefficients, and algotype mode
    """
    npkd.check2D()
    init_size = npkd.size1
    if lprank > init_size / 2 or lprank < 2:
        NPKError("error with lprank", data=npkd)
    if algotype == "burg":
        if npkd.axis1.itype == 1:
            method = burgc
        else:
            method = burgr
    else:
        raise Exception(
            "Only burg algorithm implemented in lpext for the moment")
    k = npkd.axis1.itype + 1  # 1 or 2
    npkd.chsize(sz1=k * final_size)  # first extend
    for i, r in enumerate(npkd.xcol()):
        buf = r.get_buffer()[0:init_size]  # truncate
        coeffs = method(lprank, buf)
        predicted = predict(buf, coeffs, final_size)  # and predict
        r.set_buffer(predicted)
        npkd.set_col(i, r)
    return npkd
Exemplo n.º 4
0
def ft_sh_tppi(data, axis="F1"):
    """ States-Haberkorn / TPPI F1 Fourier Transform """
    if data.dim == 1:
        raise NPKError("Not implemented in 1D", data=data)
    todo = data.test_axis(axis)
    data.fft(axis=todo)
    return data
Exemplo n.º 5
0
def wavelet(npkd, nsigma=1.0, wavelet='db3'):
    """
    Performs the wavelet denoising of a 1D or 2D spectrum.
    
    nsigma  the threshold is nsigma times the estimate noise level,
        default 1.0 - corresponds to a relatively strong denoising
    wavelet the wavelet basis used, default 'db3' (Daubechies 3)
        check pywt.wavelist() for the list of possible wavelet
    
    eg:
    d.wavelet(nsigma=0.5)  # d is cleaned after execution
    
    ref: Donoho DL (1995) De-noising by soft-thresholding. IEEE Trans Inf Theory 41:613–621.
    
    Based on the PyWavelet library
    """
    noise = findnoiselevel(npkd.get_buffer())
    if npkd.dim == 1:
        z = denoise1D(npkd.get_buffer(),
                      nsigma * npkd.get_buffer().std(),
                      wavelet=wavelet)
    elif npkd.dim == 2:
        z = denoise2D(npkd.get_buffer(),
                      nsigma * npkd.get_buffer().std(),
                      wavelet=wavelet)
    else:
        raise NPKError("not implemented")
    npkd.set_buffer(z)
    return npkd
Exemplo n.º 6
0
def ft_tppi(data, axis="F1"):
    """TPPI F1 Fourier transform"""
    if data.dim == 1:
        raise NPKError("Not implemented in 1D", data=data)
    todo = data.test_axis(axis)
    data.revf(axis=todo).rfft(axis=todo)
    return data
Exemplo n.º 7
0
def ftF2(data):
    """emulates Bruker ft of a 2D in F1 depending on FnMode"""
    if data.dim != 2:
        NPKError("Command available in 2D only", data=data)
    if data.axis2.itype == 1:
        data.ft_sim()
    else:
        data.ft_seq()
    return data
Exemplo n.º 8
0
def centroid2d(npkd, npoints_F1=3, npoints_F2=3):
    """
    from peak lists determined with peak()
    realize a centroid fit of the peak summit and width,
    computes Full width at half maximum
    updates in data peak list
    
    TODO : update uncertainties
    """
    from scipy import polyfit
    npkd.check2D()
    nF1 = npoints_F1
    nF2 = npoints_F2
    noff1 = (int(nF1) - 1) / 2
    noff2 = (int(nF2) - 1) / 2
    if (2 * noff1 + 1 != nF1) or (nF1 < 3) or (2 * noff2 + 1 != nF2) or (nF2 <
                                                                         3):
        raise NPKError("npoints must odd and >2 ", data=npkd)
    for pk in npkd.peaks:
        st1 = int(round(pk.posF1 - noff1))
        end1 = int(round(pk.posF1 + noff1 + 1))
        st2 = int(round(pk.posF2 - noff2))
        end2 = int(round(pk.posF2 + noff2 + 1))
        yxdata = np.array([(y, x) for y in range(st1, end1)
                           for x in range(st2, end2)]).ravel()
        # yx = np.array([[y,x] for y in range(1,4) for x in range(10,12)]).ravel()
        # => array([ 1, 10,  1, 11,  2, 10,  2, 11,  3, 10,  3, 11])  will be decoded by center2d
        zdata = npkd.get_buffer()[st1:end1, st2:end2].ravel()
        try:
            # yx,           yo, xo, intens, widthy, widthx
            popt, pcov = curve_fit(
                center2d,
                yxdata,
                zdata,
                p0=[pk.posF1, pk.posF2, pk.intens, 1.0, 1.0])  # fit
        except RuntimeError:
            print("peak %d (label %s) centroid could not be fitted" %
                  (pk.Id, pk.label))
        pk.posF1 = popt[0]
        pk.posF2 = popt[1]
        pk.intens = popt[2]
        pk.widthF1 = np.sqrt(2.0) * popt[3]
        pk.widthF2 = np.sqrt(2.0) * popt[4]
        errors = np.sqrt(np.diag(pcov))
        print(errors)
        pk.posF1_err = errors[0]
        pk.posF2_err = errors[1]
        pk.intens_err = errors[2]
        pk.widthF1_err = np.sqrt(2.0) * errors[3]
        pk.widthF2_err = np.sqrt(2.0) * errors[4]
Exemplo n.º 9
0
def _spline_interpolate(buff, xpoints, kind=3, nsmooth=0):
    """compute and returns a spline function 
        we are using splrep and splev instead of interp1d because interp1d needs to have 0 and last point
        it doesn't extend.
    """
    if len(xpoints) == 2:
        return _linear_interpolate(buff, xpoints)
    elif len(xpoints) > 2:
        xpoints.sort()
        y = get_ypoints(buff, xpoints, nsmooth=nsmooth)
        tck = interpolate.splrep(xpoints, y, k=kind)

        def f(x):
            return interpolate.splev(x, tck, der=0, ext=0)

        return f
    else:  # if only one points given, returns a constant, which is the value at that point.
        raise NPKError("too little points in spline interpolation")
Exemplo n.º 10
0
def lpext1d(npkd, final_size, lprank=10, algotype="burg"):
    """
    extends the current FID up to final_size, using lprank coefficients, and algotype mode
    """
    npkd.check1D()
    if lprank > npkd.size1 / 2 or lprank < 2:
        NPKError("error with lprank", data=npkd)
    if algotype == "burg":
        if npkd.axis1.itype == 1:
            method = burgc
        else:
            method = burgr
    else:
        raise Exception(
            "Only burg algorithm implemented in lpext for the moment")
    coeffs = method(lprank, npkd.get_buffer())
    predicted = predict(npkd.buffer, coeffs, final_size)
    npkd.set_buffer(predicted)
    npkd.adapt_size()
    return npkd
Exemplo n.º 11
0
def peakpick(npkd, threshold=None, zoom=None, autothresh=3.0, verbose=True):
    """
    performs a peak picking of the current experiment
    threshold is the level above which peaks are picked
        None (default) means that autothresh*(noise level of dataset) will be used - using d.robust_stats() as proxy for noise-level
    zoom defines the region on which detection is made
        zoom is in currentunit (same syntax as in display)
        None means the whole data
    """
    if threshold is None:
        mu, sigma = npkd.robust_stats()
        threshold = autothresh * sigma
        if mu > 0:
            threshold += mu
    if npkd.dim == 1:
        listpkF1, listint = peaks1d(npkd, threshold=threshold, zoom=zoom)
        #     Id, label, intens, pos
        pkl = Peak1DList( ( Peak1D(i, str(i), intens, pos) \
            for i, pos, intens in zip( range(len(listint)), list(listpkF1), list(listint) ) ), \
                    threshold=threshold, source=npkd )
        # use pos as labels -
        pkl.pos2label()
        npkd.peaks = pkl
    elif npkd.dim == 2:
        listpkF1, listpkF2, listint = peaks2d(npkd,
                                              threshold=threshold,
                                              zoom=zoom)
        #     Id, label, intens, posF1, posF2
        pkl = Peak2DList( ( Peak2D(i, str(i), intens, posF1, posF2) \
            for i, posF1, posF2, intens in zip( range(len(listpkF1)), listpkF1, listpkF2, listint ) ), \
                                    threshold=threshold, source=npkd )
        npkd.peaks = pkl
    else:
        raise NPKError("Not implemented of %sD experiment" % npkd.dim,
                       data=npkd)
    if threshold is None:
        if verbose: print('PP Threshold:', threshold)
    if verbose: print('PP: %d detected' % (len(npkd.peaks), ))
    return npkd
Exemplo n.º 12
0
def _interpolate(func, npkd, xpoints, axis='F2', nsmooth=0):
    """"
    compute and applies a linear function as a baseline correction
    xpoints are the location of pivot points
    """
    if npkd.dim == 1:
        f = func(npkd.buffer, xpoints, nsmooth=nsmooth)
        x = np.arange(npkd.size1)
        npkd.buffer -= f(x)
    elif npkd.dim == 2:
        if npkd.test_axis(axis) == 2:
            x = np.arange(npkd.size2)
            for i in xrange(npkd.size1):
                f = func(npkd.buffer[i, :], xpoints, nsmooth=nsmooth)
                npkd.buffer[i, :] -= f(x)
        elif npkd.test_axis(axis) == 1:
            x = np.arange(npkd.size1)
            for i in xrange(npkd.size2):
                f = func(npkd.buffer[:, i], xpoints, nsmooth=nsmooth)
                npkd.buffer[:, i] -= f(x)
    else:
        raise NPKError("not implemented")
    return npkd
Exemplo n.º 13
0
def centroid1d(npkd, npoints=3, reset_label=True):
    """
    from peak lists determined with peak()
    realize a centroid fit of the peak summit and width,
    will use npoints values around center  (npoints has to be odd)
    computes Full width at half maximum
    updates in data peak list
    reset_label when True (default) reset the labels of FTMS datasets
    TODO : update uncertainties
    """
    from scipy import polyfit
    npkd.check1D()
    noff = (int(npoints) - 1) / 2
    if (2 * noff + 1 != npoints) or (npoints < 3):
        raise NPKError("npoints must odd and >2 ", data=npkd)
    buff = npkd.get_buffer().real
    for pk in npkd.peaks:
        xdata = np.arange(int(round(pk.pos - noff)),
                          int(round(pk.pos + noff + 1)))
        ydata = buff[xdata]
        try:
            popt, pcov = curve_fit(center,
                                   xdata,
                                   ydata,
                                   p0=[pk.pos, pk.intens, 1.0])  # fit
        except RuntimeError:
            print("peak %d (id %s) centroid could not be fitted" %
                  (pk.Id, pk.label))
        pk.pos = popt[0]
        pk.intens = popt[1]
        pk.width = np.sqrt(2.0) * popt[2]
        errors = np.sqrt(np.diag(pcov))
        pk.pos_err = errors[0]
        pk.intens_err = errors[1]
        pk.width_err = np.sqrt(2.0) * errors[2]
    if reset_label:
        npkd.peaks.pos2label()
Exemplo n.º 14
0
def ft_phase_modu(data, axis="F1"):
    """F1-Fourier transform for phase-modulated 2D"""
    if data.dim != 2:
        raise NPKError("implemented only in 2D", data=data)
    data.flip().revf("F1").fft("F1").flop().reverse("F1")
    return data
Exemplo n.º 15
0
def check_cpx(data, axis):
    if data.axes(axis).itype != 1:
        raise NPKError("Axis %d should be complex" % axis, data=data)
Exemplo n.º 16
0
def check_real(data, axis):
    if data.axes(axis).itype != 0:
        raise NPKError("Axis %d should be real" % axis, data=data)
def bucket2d(data, zoom=((0.5, 9.5), (0.5, 9.5)), bsize=(0.1, 0.1), file=None):
    """
 This tool permits to realize a bucket integration from the current 2D data-set.
 You will have to determine  (all spectral values are in ppm)
   - zoom (F1limits, F2limits),  : the starting and ending ppm of the integration zone in the spectrum
   - bsize (F1,F2): the sizes of the bucket
   - file: the filename to which the result is written


 For a better bucket integration, you should be careful that :
   - the bucket size is not too small, size is better than number !
   - the baseline correction has been carefully done
   - the spectral window is correctly determined to encompass the meaningfull spectral zone.

    """
    data.check2D()
    start1, end1 = zoom[0]
    start2, end2 = zoom[1]
    bsize1, bsize2 = bsize
    if (bsize1 <= 0 or bsize2 <= 0):
        NPKError("Negative bucket size not allowed")
    if (start1 - bsize1 / 2 < data.axis1.itop(data.size1)):
        NPKError("Starting point outside spectrum")
    if (start2 - bsize2 / 2 < data.axis2.itop(data.size2)):
        NPKError("Starting point outside spectrum")
    if (end1 + bsize1 / 2 > data.axis1.itop(0)):
        NPKError("Ending point outside spectrum")
    if (end2 + bsize2 / 2 > data.axis2.itop(0)):
        NPKError("Ending point outside spectrum")
    if ((end1 - start1) / bsize1 < 4):
        NPKError("Integration zone too small or Bucket too large")
    if ((end2 - start2) / bsize2 < 4):
        NPKError("Integration zone too small or Bucket too large")
    ppm_per_point1 = (data.axis1.specwidth / data.axis1.frequency / data.size1)
    ppm_per_point2 = (data.axis2.specwidth / data.axis2.frequency / data.size2)
    if (bsize1 < 2 * ppm_per_point1):
        NPKError("Bucket size smaller than digital resolution !")
    if (bsize2 < 2 * ppm_per_point2):
        NPKError("Bucket size smaller than digital resolution !")

    dcopy = data.copy()  # work now on a real version of the data
    dcopy.real(axis=2)
    dcopy.real(axis=1)

    s = "# %i rectangular buckets with a mean size of %.2f x %.2f data points" % \
        ( round((end1-start1+bsize1)/bsize1)*round((end2-start2+bsize2)/bsize2), \
        bsize1/ppm_per_point1, bsize2/ppm_per_point2)
    print(s, file=file)
    if file is not None:  # wants the prompt on the terminal
        print(s)
    print(
        "centerF1, centerF2, bucket, max, min, std, bucket_size_F1, bucket_size_F2",
        file=file)
    here1 = min(start1, end1)
    here1_2 = (here1 - bsize1 / 2)
    there1 = max(start1, end1)
    #    F = open('toto.txt','w')
    while (here1_2 < there1):
        ih1 = int(round(dcopy.axis1.ptoi(here1_2)))
        next1 = (here1_2 + bsize1)
        inext1 = int(round(dcopy.axis1.ptoi(next1)))
        if ih1 < 0 or inext1 < 0:
            break
        here2 = min(start2, end2)
        here2_2 = (here2 - bsize2 / 2)
        there2 = max(start2, end2)
        while (here2_2 < there2):
            ih2 = int(round(dcopy.axis2.ptoi(here2_2)))
            next2 = (here2_2 + bsize2)
            inext2 = int(round(dcopy.axis2.ptoi(next2)))
            if ih2 < 0 or inext2 < 0:
                break
            integ = dcopy.buffer[inext1:ih1, inext2:ih2].sum()
            area = ((ih1 - inext1) * bsize1) * ((ih2 - inext2) * bsize2)
            try:
                maxv = dcopy.buffer[inext1:ih1, inext2:ih2].max()
                minv = dcopy.buffer[inext1:ih1, inext2:ih2].max()
            except ValueError:
                maxv = np.NaN  # sum and std returns nan - max returns an error ???
                minv = np.NaN  # sum and std returns nan - max returns an error ???
            stdv = dcopy.buffer[inext1:ih1, inext2:ih2].std()
            print("%.3f, %.3f, %.1f, %.1f, %.1f, %.1f, %d, %d" %
                  (here1, here2, integ / area, maxv, minv, stdv,
                   (ih1 - inext1), (ih2 - inext2)),
                  file=file)
            #            print(here1, here2, here1_2, here2_2, inext1, ih1, inext2, ih2, file=F)
            here2_2 = next2
            here2 = (here2 + bsize2)
        here1_2 = next1
        here1 = (here1 + bsize1)

    return data
def bucket1d(data, zoom=(0.5, 9.5), bsize=0.04, file=None):
    """
 This tool permits to realize a bucket integration from the current 1D data-set.
 You will have to determine  (all spectral values are in ppm)
   - zoom (low,high),  : the starting and ending ppm of the integration zone in the spectrum
   - bsize: the size of the bucket
   - file: the filename to which the result is written


 For a better bucket integration, you should be careful that :
   - the bucket size is not too small, size is better than number !
   - the baseline correction has been carefully done
   - the spectral window is correctly determined to encompass the meaningfull spectral zone.

    """
    data.check1D()
    start, end = zoom
    if (bsize <= 0): NPKError("Negative bucket size not allowed")
    if (start - bsize / 2 < data.axis1.itop(data.size1)):
        NPKError("Starting point outside spectrum")
    if (end + bsize / 2 > data.axis1.itop(0)):
        NPKError("Ending point outside spectrum")
    if ((end - start) / bsize < 10):
        NPKError("Integration zone too small or Bucket too large")
    ppm_per_point = (data.axis1.specwidth / data.axis1.frequency / data.size1)
    if (bsize < 2 * ppm_per_point):
        NPKError("Bucket size smaller than digital resolution !")

    dcopy = data.copy()  # work now on a real version of the data
    dcopy.real(axis=1)

    s = "# %i buckets with a mean size of %.2f data points" % \
        ( round((end-start+bsize)/bsize), bsize/ppm_per_point)
    print(s, file=file)
    if file is not None:  # wants the prompt on the terminal
        print(s)
    print("center, bucket, max, min, std, bucket_size", file=file)
    there = max(start, end)  # end of the bucket region
    here = min(start,
               end)  # running center of the bucket - initialized to begining
    here2 = (here - bsize / 2)  # running beginning of the bucket
    while (here2 < there):
        ih = round(
            dcopy.axis1.ptoi(here2))  # int of running beginning of the bucket
        next = (here2 + bsize)  # running en of bucket
        inext = (round(dcopy.axis1.ptoi(next)))  # int of running en of bucket
        if ih < 0 or inext < 0:
            break
        integ = dcopy.buffer[inext:ih].sum()
        try:
            maxv = dcopy.buffer[inext:ih].max()
            minv = dcopy.buffer[inext:ih].min()
        except ValueError:
            maxv = np.NaN  # sum and std returns nan - max returns an error ???
            minv = np.NaN  # sum and std returns nan - min returns an error ???
        stdv = dcopy.buffer[inext:ih].std()
        print("%.3f, %.1f, %.1f, %.1f, %.1f, %d" %
              (here, integ / ((ih - inext) * bsize), maxv, minv, stdv,
               (ih - inext)),
              file=file)
        here2 = next
        here = (here + bsize)
    return data