Exemple #1
0
def _smoothSignal(spectra, index=-1, method="flat", inplace=False,
                  **kwargs):
    if inplace:
        spectra_c = spectra
    else:
        spectra_c = spectra.copy()

    if(index == -1):  # All signals
        for i in spectra_c.index:
            new_sig = rp.smooth(spectra_c.wavenumbers,
                                spectra_c.intensity[i],
                                method, **kwargs)
            spectra_c.intensity[i] = new_sig[:len(spectra_c.wavenumbers)]
    else:
        if(isinstance(index, tuple)):  # Multiple signals
            for i in index:
                new_sig = rp.smooth(spectra_c.wavenumbers,
                                    spectra_c.intensity[i],
                                    method, **kwargs)
                spectra_c.intensity[i] = new_sig[:len(spectra_c.wavenumbers)]
        elif(isinstance(index, int)):  # Only 1 signal
            new_sig = rp.smooth(spectra_c.wavenumbers,
                                spectra_c.intensity[index],
                                method, **kwargs)
            spectra_c.intensity[index] = new_sig[:len(spectra_c.wavenumbers)]

    if not inplace:
        return spectra_c
Exemple #2
0
    def smooth(self, y, method="whittaker", **kwargs):
        """uses the smooth function of rampy to smooth the signals
        Parameters
        ----------
        y : object intensities
            the intensities to normalise. For instance, if you want to normalised the background corrected I, pass self.I_corrected.
        method : str
            Method for smoothing the signal;
            choose between savgol (Savitzky-Golay), GCVSmoothedNSpline, MSESmoothedNSpline, DOFSmoothedNSpline, whittaker, 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'.

        kwargs
        ======
        window_length : int
            The length of the filter window (i.e. the number of coefficients). window_length must be a positive odd integer.
        polyorder : int
            The order of the polynomial used to fit the samples. polyorder must be less than window_length.
        Lambda : float
            smoothing parameter of the Whittaker filter described in Eilers (2003). The higher the smoother the fit.
        d : int
            d parameter in Whittaker filter, see Eilers (2003).
        ese_y : ndarray
            errors associated with y (for the gcvspline algorithms)

        Returns
        -------
        self.y_smoothed : ndarray
            the smoothed signal for the map

        """
        self.y_smoothed = np.copy(self.I)
        for i in range(len(self.X)):
            y_ = rp.smooth(self.w, y[:, i], method=method, **kwargs)
            self.y_smoothed[:, i] = y_.ravel()
Exemple #3
0
def centroid(x,y,smoothing=False,**kwargs):
    """calculation of y signal centroid(s)
    
    as np.sum(y/np.sum(y)*x)
    
    Parameters
    ==========
    x: Numpy array, m values by n samples
        x values
    y: Numpy array, m values by n samples
        y values

    Options
    =======
    smoothing : bool
        True or False. Smooth the signals with arguments provided as kwargs. Default method is whittaker smoothing. See the rampy.smooth function for smoothing options and arguments.
        
    Returns
    =======
    centroid : Numpy array, n samples
        signal centroid(s)
    """
    
    y_ = y.copy()
    
    if smoothing == True:
        for i in range(x.shape[1]):
            y_[:,i] = rampy.smooth(x[:,i],y[:,i],**kwargs)
        
    return np.sum(y_/np.sum(y_,axis=0)*x,axis=0)
    def test_smooth(self):

        nb_points = 200
        x = np.linspace(50, 600, nb_points)

        # gaussian peaks
        p1 = 20.0 * np.exp(-np.log(2) * ((x - 150.0) / 15.0)**2)
        p2 = 100.0 * np.exp(-np.log(2) * ((x - 250.0) / 20.0)**2)
        p3 = 50.0 * np.exp(-np.log(2) * ((x - 450.0) / 50.0)**2)
        p4 = 20.0 * np.exp(-np.log(2) * ((x - 350.0) / 300)**2)
        p5 = 30.0 * np.exp(-np.log(2) * ((x - 460.0) / 5.0)**2)

        # background: a large gaussian + linear
        bkg = 60.0 * np.exp(-np.log(2) * ((x - 250.0) / 200.0)**2) + 0.1 * x

        #noise
        noise = 2.0 * np.random.normal(size=nb_points)

        #observation
        y = p1 + p2 + p3 + p4 + p5 + noise + bkg

        # calculating the baselines
        #y_smo_1 = rp.smooth(x,y,method="GCVSmoothedNSpline")
        #y_smo_2 = rp.smooth(x,y,method="DOFSmoothedNSpline")
        #y_smo_3 = rp.smooth(x,y,method="MSESmoothedNSpline")
        y_smo_4 = rp.smooth(x,
                            y,
                            method="savgol",
                            window_length=5,
                            polyorder=2)
        y_smo_5 = rp.smooth(x, y, method="whittaker", Lambda=10**0.5)
        y_smo_6 = rp.smooth(x, y, method="flat", window_length=5)
        y_smo_7 = rp.smooth(x, y, method="hanning", window_length=5)
        y_smo_8 = rp.smooth(x, y, method="hamming", window_length=5)
        y_smo_9 = rp.smooth(x, y, method="bartlett", window_length=5)
        y_smo_10 = rp.smooth(x, y, method="blackman", window_length=5)

        # Testing the shapes
        #np.testing.assert_equal(y_smo_1.shape,y.shape)
        #np.testing.assert_equal(y_smo_2.shape,y.shape)
        #np.testing.assert_equal(y_smo_3.shape,y.shape)
        np.testing.assert_equal(y_smo_4.shape, y.shape)
        np.testing.assert_equal(y_smo_5.shape, y.shape)
        np.testing.assert_equal(y_smo_6.shape, y.shape)
        np.testing.assert_equal(y_smo_7.shape, y.shape)
        np.testing.assert_equal(y_smo_8.shape, y.shape)
        np.testing.assert_equal(y_smo_9.shape, y.shape)
        np.testing.assert_equal(y_smo_10.shape, y.shape)

        #testing the y values, difference should be less than a percent
        #self.assertTrue(np.sum(np.abs(y-y_smo_1))/np.sum(y)<0.1)
        #self.assertTrue(np.sum(np.abs(y-y_smo_2))/np.sum(y)<0.1)
        #self.assertTrue(np.sum(np.abs(y-y_smo_3))/np.sum(y)<0.1)
        self.assertTrue(np.sum(np.abs(y - y_smo_4)) / np.sum(y) < 0.1)
        self.assertTrue(np.sum(np.abs(y - y_smo_5)) / np.sum(y) < 0.1)
        self.assertTrue(np.sum(np.abs(y - y_smo_6)) / np.sum(y) < 0.1)
        self.assertTrue(np.sum(np.abs(y - y_smo_7)) / np.sum(y) < 0.1)
        self.assertTrue(np.sum(np.abs(y - y_smo_8)) / np.sum(y) < 0.1)
        self.assertTrue(np.sum(np.abs(y - y_smo_9)) / np.sum(y) < 0.1)
        self.assertTrue(np.sum(np.abs(y - y_smo_10)) / np.sum(y) < 0.1)
Exemple #5
0
    def test_smooth(self):

        nb_points  = 200
        x = np.linspace(50, 600, nb_points)

        # gaussian peaks
        p1 = 20.0 * np.exp(-np.log(2) * ((x-150.0)/15.0)**2)
        p2 = 100.0 * np.exp(-np.log(2) * ((x-250.0)/20.0)**2)
        p3 = 50.0 * np.exp(-np.log(2) * ((x-450.0)/50.0)**2)
        p4 = 20.0 * np.exp(-np.log(2) * ((x-350.0)/300)**2)
        p5 = 30.0 * np.exp(-np.log(2) * ((x-460.0)/5.0)**2)

        # background: a large gaussian + linear
        bkg = 60.0 * np.exp(-np.log(2) * ((x-250.0)/200.0)**2) + 0.1*x

        #noise
        noise = 2.0 * np.random.normal(size=nb_points)

        #observation
        y = p1 + p2 + p3 + p4 + p5 + noise +bkg

        # calculating the baselines
        #y_smo_1 = rp.smooth(x,y,method="GCVSmoothedNSpline")
        #y_smo_2 = rp.smooth(x,y,method="DOFSmoothedNSpline")
        #y_smo_3 = rp.smooth(x,y,method="MSESmoothedNSpline")
        y_smo_4 = rp.smooth(x,y,method="savgol",window_length=5,polyorder=2)
        y_smo_5 = rp.smooth(x,y,method="whittaker",Lambda=10**0.5)
        y_smo_6 = rp.smooth(x,y,method="flat",window_length=5)
        y_smo_7 = rp.smooth(x,y,method="hanning",window_length=5)
        y_smo_8 = rp.smooth(x,y,method="hamming",window_length=5)
        y_smo_9 = rp.smooth(x,y,method="bartlett",window_length=5)
        y_smo_10 = rp.smooth(x,y,method="blackman",window_length=5)

        # Testing the shapes
        #np.testing.assert_equal(y_smo_1.shape,y.shape)
        #np.testing.assert_equal(y_smo_2.shape,y.shape)
        #np.testing.assert_equal(y_smo_3.shape,y.shape)
        np.testing.assert_equal(y_smo_4.shape,y.shape)
        np.testing.assert_equal(y_smo_5.shape,y.shape)
        np.testing.assert_equal(y_smo_6.shape,y.shape)
        np.testing.assert_equal(y_smo_7.shape,y.shape)
        np.testing.assert_equal(y_smo_8.shape,y.shape)
        np.testing.assert_equal(y_smo_9.shape,y.shape)
        np.testing.assert_equal(y_smo_10.shape,y.shape)

        #testing the y values, difference should be less than a percent
        #self.assertTrue(np.sum(np.abs(y-y_smo_1))/np.sum(y)<0.1)
        #self.assertTrue(np.sum(np.abs(y-y_smo_2))/np.sum(y)<0.1)
        #self.assertTrue(np.sum(np.abs(y-y_smo_3))/np.sum(y)<0.1)
        self.assertTrue(np.sum(np.abs(y-y_smo_4))/np.sum(y)<0.1)
        self.assertTrue(np.sum(np.abs(y-y_smo_5))/np.sum(y)<0.1)
        self.assertTrue(np.sum(np.abs(y-y_smo_6))/np.sum(y)<0.1)
        self.assertTrue(np.sum(np.abs(y-y_smo_7))/np.sum(y)<0.1)
        self.assertTrue(np.sum(np.abs(y-y_smo_8))/np.sum(y)<0.1)
        self.assertTrue(np.sum(np.abs(y-y_smo_9))/np.sum(y)<0.1)
        self.assertTrue(np.sum(np.abs(y-y_smo_10))/np.sum(y)<0.1)
        # read that file into an array
        filedatag = np.genfromtxt(full_fnameg, comments='#', delimiter='\t')
        filedata2d = np.genfromtxt(full_fname2d, comments='#', delimiter='\t')


        ####################################################################
        ########################## D, G, 2D peak fitting ############################

        #load data
        xg = filedatag[:,0]
        yg_org = filedatag[:,1]
        x2d = filedata2d[:,0]
        y2d_org = filedata2d[:,1]/ratio
        
        #smooth
        yg_s = rp.smooth(xg,yg_org,method="whittaker",Lambda=10)
        y2d_s = rp.smooth(x2d,y2d_org,method="whittaker",Lambda=10)        
        
        #remove background
            #g peak
        bir = np.array([(min(xg),1030),(1900,max(xg))])
        yg_cor, background = rp.baseline(xg,yg_s,bir,"arPLS",lam=10**8)
        yg_corr = yg_cor[:,0]
        
            #2d peak
        bir = np.array([(min(x2d),2550),(3100,max(x2d))])
        y2d_cor, background = rp.baseline(x2d,y2d_s,bir,"arPLS",lam=10**8)
        y2d_corr = y2d_cor[:,0]        
        
        #fix spectrum
        y = np.concatenate((y2d_corr,yg_corr))
Exemple #7
0
def find_intencity_sf(structure_name,
                      tol=1e-7,
                      sigma=10,
                      bounds=(900, 4000),
                      max_iter=None,
                      all_freqs=False):
    structure = load_structures([structure_name])[structure_name]
    if not all_freqs:
        calc_maxes = find_function_max(
            structure.pm.get_spectrum_function(sigma=sigma).calculate,
            structure.freqs,
            bounds=bounds)
        calc_maxes = calc_maxes[['peak_position',
                                 'intencity']].drop_duplicates()
    else:
        calc_maxes = pd.DataFrame({
            'peak_position': structure.freqs,
            'intencity': structure.intencities
        })

    intencity_norm_constant = np.max(calc_maxes.intencity)
    params = generate_parameters(intencity_norm_constant,
                                 calc_maxes.intencity.values,
                                 calc_maxes.peak_position.values)

    spectrum = np.genfromtxt('spectra_csv/absorbtivity/' + structure_name +
                             '.CSV',
                             delimiter=';')

    algo = 'nelder'
    x_fit = spectrum[:, 0]
    y_fit = spectrum[:, 1]

    # units_constant = gas_constant * 296 / 101.3e3 * 1e10 * 1e-5
    y_smo = rp.smooth(x_fit, y_fit, method="bartlett", window_length=31)
    norm_constant = np.max(y_smo) / 10
    y_smo /= norm_constant  # normalise spectra to maximum intensity, easier to handle
    result = lmfit.minimize(
        residual,
        params,
        method=algo,
        args=(x_fit, y_smo),
        tol=tol,
        bounds=bounds,
        options={'maxiter':
                 max_iter})  # fit data with  nelder model from scipy
    peaks = residual(result.params, x_fit)
    yout = peaks[0]

    psis = []
    sigmas = []
    mus = []
    units_constant = gas_constant * 296 / 101.3e3 * 1e10
    for i in range(len(calc_maxes.intencity.values)):
        i_i = result.params[
            'i' + str(i)].value * units_constant * norm_constant * 1e-5
        sigma_i = result.params['sigma' + str(i)].value
        mu_i = result.params['mu' + str(i)].value

        psi = i_i * sigma_i * np.sqrt(2 * np.pi)
        psis.append(psi)
        sigmas.append(sigma_i)
        mus.append(mu_i)

    calc_maxes['psi'] = psis
    calc_maxes['sigma'] = sigmas
    calc_maxes['mu'] = mus
    calc_maxes['sf'] = calc_maxes.psi / calc_maxes.intencity

    fg_types = find_function_max(
        structure.pm.get_spectrum_function(sigma=sigma).calculate,
        structure.freqs,
        bounds=bounds)
    functional_groups = [
        ' '.join(structure.get_fg_by_freq(freq)) for freq in fg_types.calc_freq
    ]
    fg_types['functional_group'] = functional_groups

    plt.figure(figsize=(20, 10))
    plt.plot(x_fit, y_smo, 'k-', label='experimental spectrum')
    plt.plot(x_fit, yout, 'r-', label='gaussian approximation')
    plt.legend()
    plt.show()

    return calc_maxes, fg_types