raise NPKError("Axis %d should be complex" % axis, data=data) #--------------------------------------------------------------------------- def ft_sim(data): """performs the fourier transform of a data-set acquired on a Bruker in simultaneous mode Processing is performed only along the F2 (F3) axis if in 2D (3D) (Bruker QSIM mode)""" todo = data.dim data.revf().fft(axis=todo) return data NPKData_plugin("ft_sim", ft_sim) #--------------------------------------------------------------------------- def ft_seq(data): """performs the fourier transform of a data-set acquired on a Bruker in simultaneous mode Processing is performed only along the F2 (F3) axis if in 2D (3D) (Bruker QSIM mode)""" todo = data.dim data.revf().rfft(axis=todo) return data NPKData_plugin("ft_seq", ft_seq)
Binv, y, eta, nbiter=nbiter, lamda=lamda, prec=precision, full_output=full_output) Ok = not np.isnan( x.sum()) # the current algo sometimes produces NaN values if not Ok: NaN_found += 1 uncertainty *= 1.4 npkd.set_buffer(x[:, 0]) npkd.noise = eta if full_output: npkd.full_output = c if NaN_found > 0: print("%d NaN conditions encountered during PALMA processing" % NaN_found) return npkd def test(npkd): print('Not implemented') NPKData_plugin("palma", palma) NPKData_plugin("do_palma", do_palma) NPKData_plugin("prepare_palma", prepare_palma) NPKData_plugin("calibdosy", dcalibdosy)
if npkd.dim == 1: z = denoise1D(npkd.get_buffer(), nsigma * npkd.get_buffer().std(), wavelet=wavelet) elif npkd.dim == 2: z = denoise2D(npkd.get_buffer(), nsigma * npkd.get_buffer().std(), wavelet=wavelet) else: raise NPKError("not implemented") npkd.set_buffer(z) return npkd if ok: NPKData_plugin("wavelet", wavelet) class WaveLetTest(unittest.TestCase): """ - Testing Wavelet plugin- """ def setUp(self): self.verbose = 1 # verbose > 0 switches messages on def announce(self): if self.verbose > 0: print("\n========", self.shortDescription(), '===============') def test_wave(self): """ - testing wavelet - """ from spike.util.signal_tools import findnoiselevel from spike.NPKData import NPKData
if xlabel == "_def_": xlabel = npkd.axis2.currentunit if ylabel == "_def_": ylabel = npkd.axis1.currentunit if xlabel is not None: dbk['x_axis_label'] = xlabel if ylabel is not None: dbk['y_axis_label'] = ylabel dbk.update(dbkdic) xs, ys, col = get_contour_data(ax) if redraw: npkd.bokeh_fig['xs'] = xs npkd.bokeh_fig['ys'] = ys else: p = bk.figure(**dbk) xs, ys, col = get_contour_data(ax) dfig = {} dfig['xs'] = xs dfig['ys'] = ys dfig['color'] = col dfig.update(dfigdic) p.multi_line(**dfig) npkd.bokeh_fig = dfig npkd.bokeh_plot = p del fig, ax if show: bk.show(npkd.bokeh_plot) return npkd NPKData_plugin("bokeh", bokeh_display)
d.peaks.report(NbMaxPeaks=10) def test_center2d(self): M = np.zeros((20, 20)) # add one peak at (F1,F2) 5.3, 7.9 with widthes (5.0,1.3) for y in range(1, 10): for x in range(6, 11): # yo, x0 intens, widthy, widthx M[y, x] = center2d(np.array([y, x]), 5.3, 7.9, 20.0, 5.0, 1.3) #print (M[1:10,6:11]) self.assertAlmostEqual(M[2, 7], 5.87777515) d = _NPKData(buffer=np.maximum(M, 0.0)) d.peaks = Peak2DList(source=d) # self, Id, label, intens, posF1, posF2 d.peaks.append(Peak2D(0, "0", 18.0, 5, 8)) d.centroid(npoints_F1=5) self.assertAlmostEqual(d.peaks[0].posF1, 5.3) self.assertAlmostEqual(d.peaks[0].posF2, 7.9) self.assertAlmostEqual(d.peaks[0].intens, 20.0) self.assertAlmostEqual(d.peaks[0].widthF1, 5.0 * np.sqrt(2)) self.assertAlmostEqual(d.peaks[0].widthF2, 1.3 * np.sqrt(2)) NPKData_plugin("pp", peakpick) NPKData_plugin("peakpick", peakpick) NPKData_plugin("centroid", centroid) NPKData_plugin("report_peaks", report_peaks) NPKData_plugin("display_peaks", display_peaks) NPKData_plugin("peaks2d", peaks2d) NPKData_plugin("pk2pandas", pk2pandas)
window_size) else: raise NPKError("a faire") return npkd ######################################################################## def sg2D(npkd, window_size, order, deriv=None): """applies a 2D Savitzky-Golay of order filter to data window_size : int the length of the square window. Must be an odd integer number. order : int the order of the polynomial used in the filtering. Must be less than `window_size` - 1. deriv: None, 'col', or 'row'. 'both' mode does not work. the direction of the derivative to compute (default = None means only smoothing) can be applied to a 2D only. """ import spike.Algo.savitzky_golay as sgm npkd.check2D() npkd.set_buffer( sgm.savitzky_golay2D(npkd.get_buffer(), window_size, order, derivative=deriv)) return npkd NPKData_plugin("sg", sg) NPKData_plugin("sg2D", sg2D)
for the second pass a lower rank can be used. """ if npkd.dim == 1: if npkd.axis1.itype == 0: # real buff = as_cpx(_base_ifft(_base_rfft(npkd.buffer))) # real case, go to analytical signal else: #complex buff = npkd.get_buffer() # complex case, makes complex sane_result = sane(buff, rank, orda=orda, trick=trick, iterations=iterations) # performs denoising if npkd.axis1.itype == 0: # real buff = _base_irfft(_base_fft(as_float(sane_result))) # real case, comes back to real npkd.set_buffer(buff) else: npkd.buffer = as_float(sane_result) # complex case, makes real elif npkd.dim == 2: todo = npkd.test_axis(axis) if todo == 2: for i in xrange(npkd.size1): r = npkd.row(i).sane(rank=rank, orda=orda, iterations=iterations) npkd.set_row(i,r) elif todo == 1: for i in xrange(npkd.size2): r = npkd.col(i).sane(rank=rank, orda=orda, iterations=iterations) npkd.set_col(i,r) elif npkd.dim == 3: raise Exception("not implemented yet") return npkd NPKData_plugin("sane", sane_plugin)
# here, noise level is half of the largest amplitude. np.random.seed(12345) # noisy data nfid = fid0 + NOISE * np.random.randn( len(fid0)) + 1j * NOISE * np.random.randn(len(fid0)) # generate sampling RATIO = 1. / 8 sampling = gene_sampling(RATIO) # prepare f = NPKData(buffer=nfid[sampling]) f.axis1.sampling = sampling # do it t0 = time.time() g = f.copy().pg_sane(iterations=20, rank=15) elaps = time.time() - t0 SNR = -20 * np.log10( np.linalg.norm(g.get_buffer() - fid0) / np.linalg.norm(fid0)) print( "test_NUS_sampling2: elaps %.2f sec SNR: %.1f dB should be larger than 30dB" % (elaps, SNR)) self.assertTrue(SNR > 30.0) ax1 = plt.subplot(211) f.copy().apod_sin().zf(2).fft().display( title='spectrum original data with sampling noise', figure=ax1) ax2 = plt.subplot(212) g.copy().apod_sin().zf(2).fft().display( title='spectrum after pg_sane cleaning', figure=ax2) NPKData_plugin("pg_sane", pg_sane)
""" set to zeros all points below nsigma times the noise level This allows the corresponding data-set, once stored to file, to be considerably more compressive. nsigma: float the ratio used, typically 1.0 to 3.0 (higher compression) nbseg: int the number of segments used for noise evaluation, see util.signal_tools.findnoiselevel axis: int the axis on which the noise is evaluated, default is fastest varying dimension """ todo = npkd.test_axis(axis) if npkd.dim == 1: noise = findnoiselevel(npkd.get_buffer(), nbseg=nbseg) npkd.zeroing(nsigma * noise) elif npkd.dim == 2: if todo == 2: for i in xrange(npkd.size1): npkd.set_row(i, npkd.row(i).fastclean(nsigma=nsigma, nbseg=nbseg)) elif todo == 1: for i in xrange(npkd.size2): npkd.set_col(i, npkd.col(i).fastclean(nsigma=nsigma, nbseg=nbseg)) else: raise NPKError("a faire") return npkd NPKData_plugin("fastclean", fastclean)
def lpext(npkd, final_size, lprank=10, algotype="burg"): """ extends a 1D FID or 2D FID in F1 up to final_size, using lprank coefficients, and algotype mode """ if npkd.dim == 1: return lpext1d(npkd, final_size, lprank=10, algotype="burg") elif npkd.dim == 2: return lpext2d(npkd, final_size, lprank=10, algotype="burg") else: raise Exception("Not implemented yet") class LinpredicTests(unittest.TestCase): def setUp(self): self.verbose = 1 # verbose >0 switches on messages def announce(self): if self.verbose > 0: print(self.shortDescription()) def test_log(self): """testing log""" import math self.announce() x = 0.0 y = math.log(1.0) self.assertAlmostEqual(x, y) NPKData_plugin("lpext", lpext)
"""computes a new set of parameters whil moving pivot from pvbef to pvaft""" # p (pbef) goes from 0 to 1 so, for a buffer of size, pivot position is p*size # in position x, phase correction is prop to (x/size - p) # ph = P0 + (x/size - p) P1 + (x/size - p)² P2 # ph = P0 + x/size P1 - p P1 + (x/size)² P2 + p² P2 - 2 x p/size P2 # ph = P0 - p P1 + p² P2 - 2 x p/size P2 + x/size P1 + (x/size)² P2 # and with a new pivot paft: # ph = P0' - paft P1' + paft² P2' - 2 x paft/size P2' + x/size P1' + (x/size)² P2' # P0' - paft P1' + paft² P2' + (x/size)(- 2 paft P2' + P1') + (x/size)² P2' # for the phase to be equal with paft we need that cst, x and x² terms to be equal, which means # so # => P2' = P2 # - 2 paft P2' + P1' = - 2 p P2 + P1 # => P1' = P1 - 2 p P2 + 2 paft P2 # = P1 + 2(paft-p)P2 # P0' - paft P1' + paft² P2' = P0 - p P1 + p² P2 # => P0' = P0 - p P1 + p² P2 + paft P1' - paft² P2' # = P0 - p P1 + (p²-paft²) P2 + paft P1' p2p = p2 p1p = p1 + 2*(pvaft-pvbef)*p2 p0p = 360*(p0/360 - pvbef*p1 + (pvbef**2 - pvaft**2)*p2 + pvaft*p1p) # then bring ph0 in [-180, 180] while p0p > 180: p0p -= 360 while p0p < -180: p0p += 360 return p0p, p1p, p2p, pvaft # NPKData_plugin("phase", phase) # supercede the regular phase correction code - not compatible with Test suite ! NPKData_plugin("phaseMS", phase)
#!/usr/bin/env python # encoding: utf-8 """Test procedure for plugins """ from __future__ import print_function from spike.NPKData import NPKData_plugin def fake(dd, title): "fake method" dd.test_title = title return dd NPKData_plugin("fake", fake)
pass else: xrange = range def rem_ridge(data): """ This function removes a F1 ridge by evaluating a mean avalue over the last 10% data of each column of a 2D """ data.check2D() deb = int(0.9 * data.size1) # debut et fin de l'évaluation fin = data.size1 r = data.row(deb) for i in xrange(deb + 1, fin): # je calcule la moyenne r.add(data.row(i)) r.mult(-1.0 / (fin - deb)) for i in xrange(data.size1): data.set_row(i, data.row(i).add(r)) return data # et garde la syntaxe standard NPKData NPKData_plugin("rem_ridge", rem_ridge) """ rem_ridge() injection now on (in this running version) data.rem_ridge() will realize a baseline ridge correction """
cc = npkd.col(i) # going column wise is probably faster ... d5[i - z2lo, :] = cc[ z1lo:z1up + 1] # taking a slice out of a npkdata returns a np.array zmax = np.amax(d5) zmin = np.amin(d5) # 0 - some data-set are negative xmin = zoom[2] xmax = zoom[3] ymin = zoom[0] ymax = zoom[1] mlab.figure(bgcolor=(1., 1., 1.), fgcolor=(0., 0., 0.)) mlab.surf(d5, extent=[0, 1000, 0, 1000, 0, 1000], warp_scale='auto', colormap=colormap) ax = mlab.axes(x_axis_visibility=showaxes, y_axis_visibility=showaxes, z_axis_visibility=showaxes, xlabel="F2 " + npkd.axis2.currentunit, ylabel="F1 " + npkd.axis1.currentunit, zlabel='Intensity', ranges=[xmin, xmax, ymin, ymax, zmin, zmax], nb_labels=5) ax.label_text_property.font_family = font ax.title_text_property.font_family = font ax.axes.font_factor = fontsize if ok: NPKData_plugin("zoomwindow", zoom3D)
P1step = -P1step break P0min = P0minnext P1min = P1minnext if debug: dd = d.copy() P0, P1 = phase_pivot(dd, P0min, P1min, pivot) print("*** P0 P1 :", P0, P1) color_sequence = [ '#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c', '#98df8a', '#d62728', '#ff9896', '#9467bd', '#c5b0d5', '#8c564b', '#c49c94', '#e377c2', '#f7b6d2', '#7f7f7f', '#c7c7c7', '#bcbd22', '#dbdb8d', '#17becf', '#9edae5' ] dd.display(new_fig=False, label="%.0f %.0f" % (P0, P1), color=color_sequence[neval % len(color_sequence)]) P0step = P0step / 2.0 P1step = P1step / 2.0 if P0step < 5.0: bcorr = baselinecorr # bcorr is expensive, so we make it only at the end if needed if debug: print('bcorr = True') (P0, P1) = phase_pivot(d, P0min, P1min, pivot) if debug: print("**FINAL** %.2f %.2f in %d evaluations" % (P0, P1, neval)) d.axis1.P0 = P0 d.axis1.P1 = P1 return d NPKData_plugin("apmin", apmin)
for mol, values in calib.items(): mass, charge = values peptide = False try: nmass = iso.parse_peptide(mol).monoisotop(charge) peptide = True except: pass if not peptide: try: nmass = iso.parse_formula(mol).monoisotop(charge) peptide = False except: print('could not understand %s' % mol) continue if nmass < 10.0: continue if peptide and charge > 0: # then add H+ nmass += iso.parse_formula('H').monoisotop() diff.append(1E6 * abs(mass - nmass) / mass) calib[mol] = (nmass, charge) print('recalibrate mean: %.3f ppm max: %.3f ppm' % (sum(diff) / len(diff), max(diff))) return calib # and plug the whole stuf into NPKData NPKData_plugin("set_calib", set_calib) NPKData_plugin("calib", calib) NPKData_plugin("display_calib", display_calib)
simple 1D correction spline: a cubic spline correction both linear and spline use an additional list of pivot points 'xpoints' used to calculate the baseline if xpoints absent, pivots are estimated automaticaly if xpoints is integer, it determines the number of computed pivots (defaut is 8 if xpoints is None) if xpoints is a list of integers, there will used as pivots if nsmooth >0, buffer is smoothed by moving average over 2*nsmooth+1 positions around pivots. if dataset is complex, the xpoints are computed on the modulus spectrum, unless modulus is False default is spline with automatic detection of 8 baseline points """ if method == 'auto': return bcorr_auto(npkd) else: if xpoints is None or isinstance(xpoints, int): xpoints = autopoints(npkd, xpoints, modulus=modulus) if method == 'linear': return linear_interpolate(npkd, xpoints, nsmooth=nsmooth) elif method == 'spline': return spline_interpolate(npkd, xpoints, nsmooth=nsmooth) else: raise Exception("Wrong method in bcorr plugin") NPKData_plugin("bcorr_lin", linear_interpolate) NPKData_plugin("bcorr_spline", spline_interpolate) NPKData_plugin("bcorr_auto", bcorr_auto) NPKData_plugin("bcorr", bcorr)
#!/usr/bin/env python # encoding: utf-8 """computes diagonal of 2D-MS spectra Created by DELSUC Marc-André on 2020-12-10. """ import numpy as np from spike.NPKData import NPKData_plugin def diagonal(self): """allows to extract the diagonal of a 2D FTMS spectrum""" self.check2D() ddiag = self.row(0) # container diag = np.zeros(self.size2) # data high = int(self.axis2.mztoi(self.axis1.lowmass)) # borders in index low = int(self.axis2.mztoi(self.axis1.highmass)) i = np.arange(low, high) # convert indices to mz z = self.axis2.itomz(i) iz = np.int_(np.round(self.axis1.mztoi(z))) jz = np.int_(np.round(self.axis2.mztoi(z))) diag[jz] = self[iz, jz] # and copy ddiag.set_buffer(diag) return ddiag NPKData_plugin("diagonal", diagonal)
integoff=0.3, integscale=0.5, color='red', label=False, labelxposition=1, labelyposition=None, regions=False, zoom=None, figure=None, curvedict=None, labeldict=None): npkd.integrals.display(integoff=integoff, integscale=integscale, color=color, label=label, labelxposition=labelxposition, labelyposition=labelyposition, regions=regions, zoom=zoom, figure=figure, curvedict=curvedict, labeldict=labeldict) return npkd display.__doc__ = Integrals.display.__doc__ NPKData_plugin("integrate", integrate) NPKData_plugin("integral_calibrate", calibrate) NPKData_plugin("display_integral", display)
(ih1 - inext1), (ih2 - inext2)), file=file) # print(here1, here2, here1_2, here2_2, inext1, ih1, inext2, ih2, file=F) here2_2 = next2 here2 = (here2 + bsize2) here1_2 = next1 here1 = (here1 + bsize1) return data class BucketingTests(unittest.TestCase): def setUp(self): self.verbose = 1 # verbose >0 switches on messages def announce(self): if self.verbose > 0: print(self.shortDescription()) def _test_log(self): """testing log""" import math self.announce() x = 0.0 y = math.log(1.0) self.assertAlmostEqual(x, y) NPKData_plugin("bucket1d", bucket1d) NPKData_plugin("bucket2d", bucket2d)
# create 1D spectrum t = np.linspace(0,10,1000) y = np.zeros_like(t) A = (100,100,100) W = (100, 110, 115) TAU = (0.3, 1, 3) for a,w,tau in zip(A,W, TAU): y += a*np.cos(w*t)*np.exp(-t*tau) Y = np.fft.rfft(y).real Y -= Y[0] # load and peak pick d=spike.NPKData._NPKData(buffer=Y) d.pp(threshold=1000) # check self.assertEqual(list(d.peaks.pos) , [159.0, 175.0, 183.0]) d.fit() if scipy.__version__ > '0.17.0': # first fit is not full because of constraints on widthes (third peak) self.assertAlmostEqual(d.peaks.chi2, 121.72613405, places=2) d.fit() self.assertAlmostEqual(d.peaks.chi2, 15.0445981291, places=2) # second is complete # other possibility is centroid d.pp(threshold=1000) d.centroid() d.fit(zoom=(140,200)) self.assertAlmostEqual(d.peaks.chi2, 12.4304236435, places=1) # lower because of zoom. self.assertAlmostEqual( sum(list(d.peaks.pos)), 517.74817237246634, places=2) NPKData_plugin("simulate", simulate) NPKData_plugin("fit", fit) NPKData_plugin("display_fit", display_fit)
Created by DELSUC Marc-André on February 2019 Copyright (c) 2019 IGBMC. All rights reserved. """ import numpy as np from spike.NPKData import as_float, NPKData_plugin #------------------------------------------------------------------------------- def gaussenh(npkd, width, enhancement=2.0, axis=0): """ apply an gaussian enhancement, width is in Hz enhancement is the strength of the effect multiplies by gauss(width) * exp(-enhancement*width) """ todo = npkd.test_axis(axis) it = npkd.axes(todo).itype sw = npkd.axes(todo).specwidth size = npkd.axes(todo).size if it == 1: # means complex size = size // 2 baseax = width * np.arange(size) / sw e = np.exp(enhancement * baseax) e *= np.exp(-(baseax)**2) if it == 1: e = as_float((1 + 1.0j) * e) # check NPKData.py to see how apodisations are handled. return npkd.apod_apply(axis, e) NPKData_plugin("gaussenh", gaussenh)