Пример #1
0
def massLossRate(dat):
     js =2
     je = dat.Nx-1     
     ie = dat.Nz-1
     ist = 2
     
     ro = dat.dd[:,js]*dat.Dsc
     
     #  outer x boundary
     mout1 = zeros(dat.Nz)      
     
     mout1[:] = 2.*pi* dat.Rsc*dat.x[je]* dat.u2[:, je]*dat.Usc* ro       
     
     mout1[:]=[0. if x<0. else x for x in mout1 ]  
     
     mdotTot1 = trapz(mout1, dat.z*dat.Rsc)

     mout2 = zeros(dat.Nx)           
     
     #  upper z boundary
     mout2[:] = 2.*pi* dat.Rsc*dat.x[:]* dat.u1[ie, :]*dat.Usc* dat.dd[ie,:]*dat.Dsc        
         
                
     mdotTotUp = trapz(mout2, dat.x*dat.Rsc)
     
     mout2[:] = 0.
     #  lower z boundary
     mout2[:] = -2.*pi* dat.Rsc*dat.x[:]* dat.u1[ist, :]*dat.Usc* dat.dd[ist,:]*dat.Dsc            
          
     
     mdotTotBot = trapz(mout2, dat.x*dat.Rsc)
          
     return(mdotTot1, mdotTotUp, mdotTotBot )
Пример #2
0
  def turbulent_power_curve(self):
    '''post processes the power curve for 10 minute average points in a class 3A turbulent environment to IEC standard'''
    # Number of points in distribution
    n = 10
    # iec standard values for class A
    I_ref = 0.16
    b = 5.6

    x = np.linspace(stats.norm.ppf(0.05), stats.norm.ppf(0.95), n)  # linear spacing between 5th and 95th percentile
    x0 = x
    pdf = stats.norm.pdf(x)

    powers = []
    for v_w in self.v_ws_at_h_ref:
      # calculating sigma of 90th quantile from IEC
      sig = (I_ref * (.75 * v_w + b))

      x = x0 * (sig / x0[-1])
      x_ea = x + v_w

      # calculates all the powers at the different points
      y_ea = np.array(interp(x_ea, self.v_ws_at_h_ref, self.powers))


      av = trapz(y_ea * pdf, x_ea) / trapz(pdf, x_ea)
      powers.append(av)

    self.powers_turb = powers
Пример #3
0
    def is_up_major(self, iatom, anglars=['s', 'p', 'd']):
        up_angs = [a + '+' for a in anglars]
        down_angs = [a + '-' for a in anglars]
        up_dos = [self.site_dos(iatom, s) for s in up_angs]

        up_dos_sum = []
        for di in up_dos:
            x = np.array([self.energy, di])
            e = x[0][x[0] < 0]
            d = x[1][x[0] < 0]
            up_dos_sum.append(trapz(d, x=e))
        self.up_tot_dos = sum(up_dos_sum)

        down_dos = [self.site_dos(iatom, s) for s in down_angs]
        down_dos_sum = []
        for di in down_dos:
            x = np.array([self.energy, di])
            e = x[0][x[0] < 0]
            d = x[1][x[0] < 0]
            down_dos_sum.append(trapz(d, x=e))
        self.down_tot_dos = sum(down_dos_sum)
        if self.up_tot_dos < self.down_tot_dos:
            print('Warning: up down reversed so that up is major ')

        return self.up_tot_dos > self.down_tot_dos
Пример #4
0
    def get_major_minor_dos(self, iatom, anglars=['s', 'p', 'd']):
        """
        return the DOS_major, DOS_minor.
        Args:
         iatom: index of atom or symnum.
         anglars: orbital names without up/down. eg. ['s','p'] or [dxy,dxz]
        """
        up_angs = [a + '+' for a in anglars]
        down_angs = [a + '-' for a in anglars]
        up_dos = [self.site_dos(iatom, s) for s in up_angs]

        up_dos_sum = []
        for di in up_dos:
            x = np.array([self.energy, di])
            e = x[0][x[0] < 0]
            d = x[1][x[0] < 0]
            up_dos_sum.append(trapz(d, x=e))
        self.up_tot_dos = sum(up_dos_sum)

        down_dos = [self.site_dos(iatom, s) for s in down_angs]
        down_dos_sum = []
        for di in down_dos:
            x = np.array([self.energy, di])
            e = x[0][x[0] < 0]
            d = x[1][x[0] < 0]
            down_dos_sum.append(trapz(d, x=e))
        self.down_tot_dos = sum(down_dos_sum)

        print(('up: %s, down: %s' % (self.up_tot_dos, self.down_tot_dos)))
        if self.up_tot_dos > self.down_tot_dos:
            return up_dos, down_dos
        else:
            return down_dos, up_dos
Пример #5
0
def X_binding(par, qw):
    """Computation of the exciton binding energy given the electron and hole
    wave function and the variational parameter par
    Defined in Eq. 15(b) of Mares and Chuang, J. Appl. Phys. 74, 1388 (1993)

    Keyword arguments:
       par -- variational parameter in [nm]
       qw -- Object containing the electronic structure related magnitudes. See QW.
    """
    
    lam = par * 1.0E-9    
    Ry = spc.physical_constants['Rydberg constant times hc in eV'][0]
    R0 = Ry*qw.mu/qw.eps_r**2
    aB = spc.physical_constants['Bohr radius'][0]   
    ax = aB*qw.eps_r/qw.mu
    beta = ax/lam  
    C0 = R0*beta**2
    C1 = -R0*4.0*beta
    
    Ze = sp.zeros([1,qw.grid.shape[0]])
    Ze[0,:] = qw.grid[:]
    Zh = sp.zeros([qw.grid.shape[0],1])
    Zh[:,0] = qw.grid[:]
    X = 2.0*sp.absolute(Ze-Zh)/par
    Fe = sp.zeros([1,qw.grid.shape[0]])
    Fe[0,:] = qw.Elec.wf[:]**2
    Fh = sp.zeros([qw.grid.shape[0],1])
    Fh[:,0] = qw.Hole.wf[:]**2
    Int = Fe * Fh * G_int(X)
    Val = sp.trapz( sp.trapz(Int, Ze.flatten() ), Zh.flatten())
    
    return C0 + C1 * Val 
Пример #6
0
def Data_processing_python(data_name, R_int, R_out, W, X0):
    import numpy as np
    import scipy as sp
    Max_elements = np.genfromtxt(data_name,
                                 usecols=(0),
                                 skip_header=0,
                                 skip_footer=-1,
                                 dtype=float,
                                 unpack=True)
    data = np.genfromtxt(data_name,
                         usecols=(0, 1),
                         skip_header=1,
                         skip_footer=Max_elements[0])
    np.matrix(data)
    g = np.genfromtxt(data_name,
                      usecols=(0),
                      skip_header=int(Max_elements[0] + 1.),
                      dtype=float,
                      unpack=True)
    from scipy.interpolate import LinearNDInterpolator
    f_cart = sp.interpolate.LinearNDInterpolator(data, g, rescale=False)
    r = np.linspace(R_int, R_out, 4000)
    o = np.linspace(-W * np.pi, W * np.pi, 4000)
    x = np.linspace(X0, X0, 4000)
    f = f_cart(r[:, np.newaxis] * np.cos(o[np.newaxis, :]) + x[:, np.newaxis],
               r[:, np.newaxis] * np.sin(o[np.newaxis, :])) * (r[:,
                                                                 np.newaxis])
    G_theta = sp.trapz(sp.trapz(f, o[np.newaxis, :], axis=1), r, axis=0)
    return G_theta
Пример #7
0
Файл: pdf.py Проект: zoidy/puq
    def __init__(self, xvals, yvals):
        # if order is reversed, flip it
        if xvals[0] > xvals[-1]:
            xvals = xvals[::-1]
            yvals = yvals[::-1]

        # number of intervals to partition our range
        nsamp = options['pdf']['numpart']

        # for pdfs with tails, set the range for sampling
        _range = options['pdf']['range']
        range = [(1.0 - _range)/2.0, (1.0 + _range)/2.0]

        self.x = xvals
        self.cdfy = np.append([0.0], np.cumsum((np.diff(yvals)/2.0 + yvals[:-1])*np.diff(xvals)))
        self.cdfy /= self.cdfy[-1]

        # Trim tails that have grown to 10% of the range of the PDF
        resample = False
        mmin, mmax = self.ppf([0, 1])
        dist = mmax - mmin
        #print "range of pdf = [%s - %s]" % (mmin, mmax)
        #print "range of PDF = [%s - %s]" % (xvals[0], xvals[-1])
        #print "dist=%s" % dist
        #print "proposed range = [%s - %s]" % (self.ppf(range[0]), self.ppf(range[1]))
        #print "[%s , %s]" % ((mmin - self.ppf(range[0]))/dist, (mmax - self.ppf(range[1]))/dist)

        if np.isnan(mmin) or abs((mmin - self.ppf(range[0])) / dist) > .1:
            mmin = self.ppf(range[0])
            resample = True
        else:
            mmin = xvals[0]

        if np.isnan(mmax) or abs((mmax - self.ppf(range[1])) / dist) > .1:
            mmax = self.ppf(range[1])
            resample = True
        else:
            mmax = xvals[-1]

        # resample if not even spacing
        if not resample:
            resample = not np.allclose(np.diff(xvals)[0], np.diff(xvals))

        # resample if number of intervals is 10% too large or small
        if not resample:
            resample = np.abs(len(xvals) - nsamp) > (nsamp * .1)

        if resample:
            self.x = np.linspace(mmin, mmax, nsamp)
            self.y = np.interp(self.x, xvals, yvals)
            self.y = np.abs(self.y / trapz(self.y, self.x))
            self.cdfy = np.append([0.0], np.cumsum((np.diff(self.y)/2.0 + self.y[:-1])*np.diff(self.x)))
        else:
            # normalize (integral must be 1.0)
            self.y = yvals / trapz(yvals, self.x)

        self.mean = trapz(self.x * self.y, self.x)
        self.dev = np.sqrt(np.abs(trapz(self.y * (self.x - self.mean)**2, self.x)))
Пример #8
0
def diffraction_efficiency(str_gmsh_path, nm, lambda0, theta, d, eps_sub,
                           eps_sup, npt_integ, nb_slice, N_d_order):
    decalage = 0
    No_ordre = np.linspace(-N_d_order + decalage, N_d_order + decalage,
                           2 * N_d_order + 1)
    x_slice = sc.linspace(-d / 2, d / 2, npt_integ)
    k_sub = 2 * pi * sc.sqrt(eps_sub) / lambda0
    k_sup = 2 * pi * sc.sqrt(eps_sup) / lambda0
    alpha_sup = k_sup * sc.sin(theta)
    beta_sup = sc.sqrt(k_sup**2 - alpha_sup**2)
    beta_sub = sc.sqrt(k_sub**2 - alpha_sup**2)
    s_t = sc.zeros((1, (2 * N_d_order + 1)), complex)[0, :]
    s_r = sc.zeros((1, (2 * N_d_order + 1)), complex)[0, :]
    Aeff_t = sc.zeros((nb_slice, 2 * N_d_order + 1), complex)
    Aeff_r = sc.zeros((nb_slice, 2 * N_d_order + 1), complex)
    Ez_diff_t = np.loadtxt('./Views/sub_field_cuts.out', usecols=[
        8
    ]) + 1j * np.loadtxt('./Views/sub_field_cuts.out', usecols=[9])
    Ez_diff_r = np.loadtxt('./Views/sup_field_cuts.out', usecols=[
        8
    ]) + 1j * np.loadtxt('./Views/sup_field_cuts.out', usecols=[9])
    Ez_diff_t = np.transpose(Ez_diff_t.reshape(npt_integ, nb_slice, order="F"))
    Ez_diff_r = np.transpose(Ez_diff_r.reshape(npt_integ, nb_slice, order="F"))
    for m1 in range(0, nb_slice):
        slice_t = Ez_diff_t[m1, :]
        slice_r = Ez_diff_r[m1, :]
        alphat_t = alpha_sup + 2 * pi / (d) * No_ordre
        alphat_r = alpha_sup + 2 * pi / (d) * No_ordre
        betat_sup = sc.sqrt(k_sup**2 - alphat_r**2)
        betat_sub = sc.sqrt(k_sub**2 - alphat_t**2)
        for k in range(0, 2 * N_d_order + 1):
            expalpha_t = sc.exp(-1j * alphat_t[k] * x_slice)
            expalpha_r = sc.exp(-1j * alphat_r[k] * x_slice)
            s_t[k] = sc.trapz(slice_t * expalpha_t, x=x_slice) / d
            s_r[k] = sc.trapz(slice_r * expalpha_r, x=x_slice) / d
        Aeff_t[m1, :] = (np.abs(s_t))**2 * betat_sub / beta_sup
        Aeff_r[m1, :] = (np.abs(s_r))**2 * betat_sup / beta_sup
    Rordre = np.mean(Aeff_r, axis=0)
    Tordre = np.mean(Aeff_t, axis=0)
    R = np.mean(np.sum(Aeff_r.real, axis=1))
    T = np.mean(np.sum(Aeff_t.real, axis=1))
    Q_lamel_in = np.loadtxt('./Views/temp-Q_lamel_in.txt')[1]
    #[bit[1] for bit in [map(float, line.split()) for line in file('./Views/temp-Q_lamel_in.txt')]][0]
    print('\n******************')
    print('* ENERGY BALANCE *')
    print('******************')
    print('R             = ', "%0.6f" % R, '     (standard dev slice2slice=',
          sc.std(np.sum(Aeff_r.real, axis=1)), ')')
    print('T             = ', "%0.6f" % T, '     (standard dev slice2slice=',
          sc.std(np.sum(Aeff_t.real, axis=1)), ')')
    print('Q_lamel_in    = ', "%0.6f" % Q_lamel_in)
    print('------------------------')
    print('TOTAL        = ', "%0.6f" % (T + R + Q_lamel_in))
    return [Rordre, Tordre, R, T, Q_lamel_in]
Пример #9
0
 def rmse(self, individual, *f_args):
     y = self.trajectory(individual, *f_args)
     # 2/NT * ∫y0^2 dt from 0 to N*T.
     ampl_ = np.sqrt(scipy.trapz(y[0, :]**2, x=self.x) * 2.0 / self.NT)
     # 2/NTA^2 * ∫y1^2 dt from 0 to N*T.
     omega_ = np.sqrt(
         scipy.trapz(y[1, :]**2, x=self.x) * 2.0 / (self.NT * ampl_**2))
     rmse_ampl = utils.numeric.rmse(self.ampl, ampl_)
     rmse_omega = utils.numeric.rmse(self.omega, omega_)
     # Alternative measure.
     # rmse_ampl = utils.numeric.rmse(self.ampl * np.sin(self.omega * self.x), y[0, :])
     # rmse_omega = utils.numeric.rmse(self.ampl * self.omega * np.cos(self.omega * self.x), y[1, :])
     return assessment.replace_nan((rmse_ampl, rmse_omega))
    def testConversion(self):

        #loadFiles()

        readerSIG = HarmonieReader(os.path.join(parentdir, "test.SIG"))

        print("Saving a copy in the BDF format...")
        readerSIG.saveAsEDF(os.path.join(parentdir, "test.bdf"),
                            "BDF",
                            annotationFileName=os.path.join(
                                parentdir, "test Annotations.bdf"))

        print("Reading the saved BDF file...")
        readerEDF = EDFReader(os.path.join(parentdir, "test.BDF"),
                              annotationFileName=os.path.join(
                                  parentdir, "test Annotations.bdf"))

        channelListSig = readerSIG.getChannelLabels()
        channelListEdf = readerEDF.getChannelLabels()
        # For each pages, verify that the signals in reader and readerEDF
        # are essentially the same by verifying their signal-to-noise ratio
        for noPage in range(readerSIG.getNbRecords()):

            # We test only complete pages since incomplete pages will be
            # different for the two formats, the EDF format padding zeros
            # at the end of the page to make complete records.
            if readerSIG.getInfoRecords(noPage + 1).isComplete:
                pageSIG = readerSIG.readRecord(channelListSig, noPage + 1)
                pageEDF = readerEDF.readRecord(channelListEdf, noPage + 1)

                for channelSig, channelEdf in zip(channelListSig,
                                                  channelListEdf):
                    Y1 = pageSIG.getSignal(channelSig)
                    Y2 = pageEDF.getSignal(channelEdf)

                    SNR = 10.0 * log10(trapz(Y1**2) / trapz((Y1 - Y2)**2))
                    print(("noPage=", noPage + 1, "(of ",
                           readerSIG.getNbRecords(), ")", "channel=",
                           channelSig, "SNR=", SNR))

                    if channelSig != "Mic-Mic" and SNR < 50.0:
                        import pylab
                        pylab.plot(list(range(len(Y1))), Y1)
                        pylab.plot(list(range(len(Y2))), Y2)
                        pylab.show()
                    self.failIf(channelSig != "Mic-Mic" and SNR < 50.0)
            else:
                print(("Skiping incomplete page " + str(noPage + 1)))
def bandpower(signal, fs, fmin, fmax):
    '''
    This function calculates the bandpower of a selected frequency range.
    
    Parameters
    ----------
    signal : array (1-dim)
        Signal for which the bandpower is calculated.
        
    fs : float or int
        Sampling rate of the input signal [Hz].
        
    fmin : float or int
        Lower limiting frequency of the power band [Hz].
    
    fmax : float or int
        Upper limiting frequency of the power band [Hz].
    
    Returns
    -------
    float
        Bandpower of the choosen frequency range.
    '''
    f, Pxx = scipy.signal.periodogram(
        signal, fs=fs)  # Pxx has units of V**2/Hz if x is measured in V
    ind_min = scipy.argmax(f > fmin) - 1  # get lower limiting frequency index
    ind_max = scipy.argmax(f > fmax) - 1  # get upper limiting frequency index
    return scipy.trapz(Pxx[ind_min:ind_max], f[ind_min:ind_max]
                       )  # integrate selected power densitiy spectrum & return
Пример #12
0
def calc(x, fmin, fmax, fs=sample_freq):
    x = np.asarray(x, dtype=np.float64)
    f, Pxx = periodogram(x, fs=fs)
    ind_min = scipy.argmax(f > fmin) - 1
    ind_max = scipy.argmax(f > fmax) - 1
    y = scipy.trapz(Pxx[ind_min: ind_max], f[ind_min: ind_max])
    return y
Пример #13
0
def bandpower(f, Pxx, fmin, fmax):
    """ integrate the power spectral density between fmin and fmax
        using the trapezoidal method
    """
    ind_min = scipy.argmax(f > fmin) - 1
    ind_max = scipy.argmax(f > fmax) - 1
    return scipy.trapz(Pxx[ind_min: ind_max], f[ind_min: ind_max])
Пример #14
0
def compute_baseline(path,animal,trials):
    
    BASELINE_INDEX = []
    
    for trial in trials:
        
        data = '{}/{}/{}_TRACES.csv'.format(path,trial,animal)
        
        trace = np.genfromtxt(data,delimiter='').reshape((16,3,-1))
        
        CHARGES = []
        for i in range(trace.shape[0]):
            
            sinus = trace[i,2,:]
            clean_trace = sinus[np.logical_not(np.isnan(sinus))]
            
            #Discard empty trials 
            if len(clean_trace) == 0:
                continue
            
            else:
                charge = trapz(clean_trace,dx=0.0001)
                
                CHARGES.append(charge)
                
        BASELINE_INDEX.append(np.nanmean(CHARGES))
        
    return [np.nanmean(BASELINE_INDEX), np.nanstd(BASELINE_INDEX), 
            np.nanstd(BASELINE_INDEX)/np.sqrt(len(BASELINE_INDEX))]
Пример #15
0
def compute_timecourse(path,animal,trials):
    
    TIMECOURSE = []
    
    for trial in trials:
        
        data = '{}/{}/{}_TRACES.csv'.format(path,trial,animal)
        
        trace = np.genfromtxt(data,delimiter='').reshape((16,3,-1))
        
        CHARGES = []
        for i in range(trace.shape[0]):
            
            sinus = trace[i,2,:]
            clean_trace = sinus[np.logical_not(np.isnan(sinus))]
            
            #Discard empty trials 
            if len(clean_trace) == 0:
                continue
            
            else:
                charge = trapz(clean_trace,dx=0.0001)
                
                CHARGES.append(charge)
                
        TIMECOURSE.append(np.nanmean(CHARGES))
        
    return TIMECOURSE
Пример #16
0
def exchange(a, b, grid, end_point=-1):
    abc, asc, al, aj = a  # radial and angular for a
    bbc, bsc, bl, bj = b  # radial and angular for b
    jmax, jmin = max(aj, bj), min(aj, bj)
    ro, w, h = grid  # radial grid and weights
    end_ind = end_point < 0 and len(ro) or (sc.where(ro > end_point)[0][0]+1)
    cabc = abc[:end_ind]
    casc = asc[:end_ind]
    cbbc = bbc[:end_ind]
    cbsc = bsc[:end_ind]
    cro = ro[:end_ind]
    cw = w[:end_ind]
    k = jmax - jmin
    if (k+al+bl) % 2 != 0:
        k += 1
    dens0 = cabc*cbbc+casc*cbsc

    dens_kg = dens0/cro**(k+1)
    dens_kl = dens0*cro**k
    res = 0e0
    while (k <= (jmax+jmin)):
        dens_g_int = cumtrapz(dens_kg*cw, initial=0e0)*h
        # внешнее интегрирование от r до бесконечности
        dens_g_int = dens_g_int[-1] - dens_g_int
        dens_l_int = cumtrapz(dens_kl*cw, initial=0e0)*h
        res_k = sc.trapz((dens_kg*dens_l_int+dens_kl*dens_g_int)*cw)*h
        res += res_k*w3js0[(jmax, jmin, k)]
        k += 2
        dens_kg = dens_kg/cro**2
        dens_kl = dens_kl*cro**2
    return res
Пример #17
0
def bandpower(f, Pxx, fmin, fmax):
    """ integrate the power spectral density between fmin and fmax
        using the trapezoidal method
    """
    ind_min = scipy.argmax(f > fmin) - 1
    ind_max = scipy.argmax(f > fmax) - 1
    return scipy.trapz(Pxx[ind_min: ind_max], f[ind_min: ind_max])
    def compute(specter, lines):
        h = 6.62606957 * 10**-34
        c = 3 * 10**8
        kb = 1.3806488 * 10**-23

        temp = []
        z = []

        for row, spec in enumerate(np.transpose(specter)):

            tempData = []

            for key, line in enumerate(lines):
                intLine = sp.trapz(spec[line[0]:line[1]])

                #print(intLine)
                lambdaNist = line[2]
                Aki = line[3]
                Ek = line[4]
                g = line[5]
                if intLine > 0:
                    nkgk = np.log(intLine / ((h * c * Aki * g) / lambdaNist))
                    tempData.append((Ek, nkgk))

            if len(tempData) > 0:
                tempArray = np.array(tempData)
                print(tempArray)
                koef = np.polyfit(tempArray[:, 0], tempArray[:, 1], 1)
                temp.append(koef[0])
                z.append(row)

        print(temp)
        plt.plot(z, temp)
        plt.show()
Пример #19
0
def calc(x, fmin, fmax, fs):
    x = np.asarray(x, dtype=np.float64)
    f, Pxx = periodogram(x, fs=fs)
    ind_min = argmax(f > fmin) - 1
    ind_max = argmax(f > fmax) - 1
    y = trapz(Pxx[ind_min: ind_max], f[ind_min: ind_max])
    return y
Пример #20
0
def bandpower(seq, sampling_frequency, frequency_band, window_sec=None):
    low, high = frequency_band
    nperseg = (2 / low) * sampling_frequency
    # f, Pxx = scipy.signal.periodogram(seq, fs=sampling_frequency)
    f, Pxx = welch(seq, sampling_frequency, nperseg=nperseg)
    ind_min = scipy.argmax(f > frequency_band[0]) - 1
    ind_max = scipy.argmax(f > frequency_band[1]) - 1
    return scipy.trapz(Pxx[ind_min:ind_max], f[ind_min:ind_max])
Пример #21
0
def sampleDerAlleles(phi, xx, ssize):
    sf = np.zeros(ssize + 1)
    for ii in xrange(0, 668):  # int( ssize*0.01) ):  # ssize+1
        # binomial distribution
        binomDist = lambda i, q: scipy.stats.binom.pmf(i, ssize, q)
        biomd = binomDist(ii, xx)
        sf[ii] = scipy.trapz(biomd * phi, xx)
    return np.nan_to_num(sf)
Пример #22
0
def getSOC(power):
    P=power
    Voc = 400
    R = 0.1
    c=90 #in kwh
    I = (Voc - math.sqrt(math.Voc** - 4*R*P))/2*R
    SOC = np.trapz(Voc*I)/(c*3600*1000)
    soc_percent = SOC*100
    return soc_percent
Пример #23
0
    def __convolveSphinx(self,star):
        
        '''
        Convolve the Sphinx output with the SPIRE resolution. The convolution
        is done in wave number (cm^-1).
        
        @param star: The Star() object for which Sphinx profiles are loaded
        @type star: Star()
        
        '''     

        #- Get sphinx model output and merge, for all star models in star_grid
        if not self.resolution: 
            print '* Resolution is undefined. Cannot convolve Sphinx.'
            return
        print '* Reading Sphinx model and merging.'
        sphinx_wav,sphinx_flux = star['LAST_GASTRONOOM_MODEL'] \
                                        and self.mergeSphinx(star) \
                                        or [[],[]]
        if not sphinx_wav: 
            print '* No Sphinx data found.'
            return
        sphinx_wav = 1./array(sphinx_wav)*10**(4)
        sphinx_flux = array(sphinx_flux)
        sphinx_wav = sphinx_wav[::-1]
        sphinx_flux = sphinx_flux[::-1]
        
        #-- eliminate some of the zeroes in the grid to reduce calculation time
        #   (can reduce the array by a factor up to 100!!)
        s = self.sigma
        lcs = array(sorted([1./line.wavelength 
                            for line in star['GAS_LINES']]))
        new_wav, new_flux = [sphinx_wav[0]],[sphinx_flux[0]]
        for w,f in zip(sphinx_wav[1:],sphinx_flux[1:]):
            if f != 0 or (w < 5*s+lcs[argmin(abs(lcs-w))] \
                                and w > lcs[argmin(abs(lcs-w))]-5*s):
                new_wav.append(w)
                new_flux.append(f)
        new_wav, new_flux = array(new_wav), array(new_flux)
        
        #-- convolve the model fluxes with a gaussian and constant sigma(spire)
        print '* Convolving Sphinx model for SPIRE.'
        convolution = Data.convolveArray(new_wav,new_flux,s)
        
        for data_wav,fn in zip(self.data_wave_list,self.data_filenames):
            rebinned = []
            #-- Convert wavelengths to wave number for integration, and reverse
            data_cm = data_wav[::-1]
            data_cm = 1./data_cm*10**4
            rebinned = [trapz(y=convolution[abs(new_wav-wavi)<=self.resolution/self.oversampling],\
                              x=new_wav[abs(new_wav-wavi)<=self.resolution/self.oversampling])\
                            /(self.resolution/self.oversampling)
                        for wavi in data_cm]
            #-- Reverse the rebinned fluxes so they match up with the 
            #   wavelength grid.
            rebinned = array(rebinned)[::-1]
            self.sphinx_convolution[star['LAST_SPIRE_MODEL']][fn] = rebinned
Пример #24
0
    def __convolveSphinx(self, star):
        '''
        Convolve the Sphinx output with the SPIRE resolution. The convolution
        is done in wave number (cm^-1).
        
        @param star: The Star() object for which Sphinx profiles are loaded
        @type star: Star()
        
        '''

        #- Get sphinx model output and merge, for all star models in star_grid
        if not self.resolution:
            print '* Resolution is undefined. Cannot convolve Sphinx.'
            return
        print '* Reading Sphinx model and merging.'
        sphinx_wav,sphinx_flux = star['LAST_GASTRONOOM_MODEL'] \
                                        and self.mergeSphinx(star) \
                                        or [[],[]]
        if not sphinx_wav:
            print '* No Sphinx data found.'
            return
        sphinx_wav = 1. / array(sphinx_wav) * 10**(4)
        sphinx_flux = array(sphinx_flux)
        sphinx_wav = sphinx_wav[::-1]
        sphinx_flux = sphinx_flux[::-1]

        #-- eliminate some of the zeroes in the grid to reduce calculation time
        #   (can reduce the array by a factor up to 100!!)
        s = self.sigma
        lcs = array(
            sorted([1. / line.wavelength for line in star['GAS_LINES']]))
        new_wav, new_flux = [sphinx_wav[0]], [sphinx_flux[0]]
        for w, f in zip(sphinx_wav[1:], sphinx_flux[1:]):
            if f != 0 or (w < 5*s+lcs[argmin(abs(lcs-w))] \
                                and w > lcs[argmin(abs(lcs-w))]-5*s):
                new_wav.append(w)
                new_flux.append(f)
        new_wav, new_flux = array(new_wav), array(new_flux)

        #-- convolve the model fluxes with a gaussian and constant sigma(spire)
        print '* Convolving Sphinx model for SPIRE.'
        convolution = Data.convolveArray(new_wav, new_flux, s)

        for data_wav, fn in zip(self.data_wave_list, self.data_filenames):
            rebinned = []
            #-- Convert wavelengths to wave number for integration, and reverse
            data_cm = data_wav[::-1]
            data_cm = 1. / data_cm * 10**4
            rebinned = [trapz(y=convolution[abs(new_wav-wavi)<=self.resolution/self.oversampling],\
                              x=new_wav[abs(new_wav-wavi)<=self.resolution/self.oversampling])\
                            /(self.resolution/self.oversampling)
                        for wavi in data_cm]
            #-- Reverse the rebinned fluxes so they match up with the
            #   wavelength grid.
            rebinned = array(rebinned)[::-1]
            self.sphinx_convolution[star['LAST_SPIRE_MODEL']][fn] = rebinned
Пример #25
0
def _bandwidth(data, N=None, MIN=None, MAX=None):
    '''
    An implementation of the kde bandwidth selection method outlined in:

    Z. I. Botev, J. F. Grotowski, and D. P. Kroese. Kernel density
    estimation via diffusion. The Annals of Statistics, 38(5):2916-2957, 2010.

    Based on the implementation in Matlab by Zdravko Botev.

    Daniel B. Smith, PhD
    https://github.com/Daniel-B-Smith/KDE-for-SciPy/blob/master/kde.py
    Updated 1-23-2013
    '''
    # Parameters to set up the mesh on which to calculate
    N = 2**14 if N is None else int(2**sp.ceil(sp.log2(N)))
    if MIN is None or MAX is None:
        minimum = min(data)
        maximum = max(data)
        Range = maximum - minimum
        MIN = minimum - Range / 10 if MIN is None else MIN
        MAX = maximum + Range / 10 if MAX is None else MAX

    # Range of the data
    R = MAX - MIN

    # Histogram the data to get a crude first approximation of the density
    M = len(data)
    DataHist, bins = sp.histogram(data, bins=N, range=(MIN, MAX))
    DataHist = DataHist / M
    DCTData = scipy.fftpack.dct(DataHist, norm=None)

    I = [iN * iN for iN in range(1, N)]
    SqDCTData = (DCTData[1:] / 2)**2

    # The fixed point calculation finds the bandwidth = t_star
    guess = 0.1
    try:
        t_star = scipy.optimize.brentq(__fixed_point,
                                       0,
                                       guess,
                                       args=(M, I, SqDCTData))
    except ValueError:
        print('Oops!')
        return None

    # Smooth the DCTransformed data using t_star
    SmDCTData = DCTData * sp.exp(-sp.arange(N)**2 * sp.pi**2 * t_star / 2)
    # Inverse DCT to get density
    density = scipy.fftpack.idct(SmDCTData, norm=None) * N / R
    mesh = [(bins[i] + bins[i + 1]) / 2 for i in range(N)]
    bandwidth = sp.sqrt(t_star) * R

    density = density / sp.trapz(density, mesh)

    # return bandwidth, mesh, density
    return bandwidth
Пример #26
0
 def Lookback_Time(self, zt):
     lenzt = len(zt)
     th = 1 / self.Ho
     E_zt = np.zeros(lenzt)
     tL = np.zeros(lenzt)
     t = np.zeros(lenzt)
     for n in range(lenzt):
         E_zt[n] = np.sqrt(omega_M * np.power((1.0 + zt[n]), 3) + omega_K * np.power((1.0 + zt[n]), 2) + omega_L)
         integrating_quantity = (1.0 + zt[0:n]) * E_zt[0:n]
         tL[n] = th * sp.trapz((1.0 / integrating_quantity), zt[0:n])
     return tL / th
Пример #27
0
    def get_fe_v1(df):
        """
        计算总负载里程
        """
        measdate = []
        s_total = 0
        if not df.empty:
            measdate = df.index[0]  # 时间
            s_total = scipy.trapz(np.abs(df.speed)) * df.dt  # 总里程

        return measdate, s_total
Пример #28
0
    def get_fe_v2(df):
        """
        通过压力模式计算负载、空载里程指标
        """
        measdate = []
        s_total = 0
        s_in_operation = 0
        if not df.empty:
            measdate = df.index[0]  # 时间
            idx = (df.in_force_control == 1)
            re_iter = com_util.Reg.finditer(idx, 0.5 *
                                            df.num_per_sec)  # 负载时间大于0.5秒计入
            s_total = scipy.trapz(np.abs(df.speed)) * df.dt  # 总里程
            for i in re_iter:
                [stidx, edidx] = i.span()
                s_in_operation += scipy.trapz(np.abs(
                    df.speed[stidx:edidx])) * df.dt  # 负载里程

        return measdate, s_total, s_in_operation, max(0,
                                                      s_total - s_in_operation)
Пример #29
0
def bandpower(x, fs, fmin, fmax):
    if any(np.isnan(x)):
        try:
            x = interp_nans(x)
        except:
            print("except!")
            return 0

    f, Pxx = periodogram(x, fs=fs, nfft=len(x) * 10)
    ind_min = argmax(f > fmin) - 1
    ind_max = argmax(f > fmax) - 1
    return trapz(Pxx[ind_min:ind_max], f[ind_min:ind_max])
Пример #30
0
 def Lookback_Time(self, zt):
     lenzt = len(zt)
     th = 1 / self.Ho
     E_zt = np.zeros(lenzt)
     tL = np.zeros(lenzt)
     t = np.zeros(lenzt)
     for n in range(lenzt):
         E_zt[n] = np.sqrt(omega_M * np.power((1. + zt[n]), 3) +
                           omega_K * np.power((1. + zt[n]), 2) + omega_L)
         integrating_quantity = (1. + zt[0:n]) * E_zt[0:n]
         tL[n] = th * sp.trapz((1. / integrating_quantity), zt[0:n])
     return tL / th
Пример #31
0
def bandpower(x, fs, fmin, fmax):
    '''
    Taken (and adapted -- to not miss indexes) from the internet: 
    https://stackoverflow.com/questions/44547669/python-numpy-equivalent-of-bandpower-from-matlab
    Based on the matlab function (bandpower)

    INPUT:
    x - time series
    fs - sample rate to return the power in a specified frequency band
    fmin - lower band of the frequency range
    fmax - upper band of the frequency range

    Return the average power in the frequency range
    '''
    f, Pxx = signal.periodogram(x, fs=fs)
    ind_min = sp.argmax(f > fmin) - 1
    ind_max = sp.argmax(f > fmax) - 1
    if ind_max != 0:
        var = sp.trapz(Pxx[ind_min:ind_max + 1], f[ind_min:ind_max + 1])
    elif ind_max == 0:
        var = sp.trapz(Pxx[ind_min:], f[ind_min:])
    return var
Пример #32
0
    def calculate_abspower(self, data, times, s_rate):
        channel_num = len(data[0])
        baseline_band_mat = []

        for c in range(channel_num):
            data = np.array(data)

            # Welch- method for computing power spectral density
            f, power = periodogram(
                data[:, c], s_rate
            )  # is doing FFM, f is mask and power the power for every frequence in channel c
            # f = frequencies (0-62,5)

            # indices for alpha and theta freq
            fmin_theta = 4
            fmax_theta = 8
            ind_min_theta = argmax(f > fmin_theta) - 1
            ind_max_theta = argmax(f > fmax_theta) - 1

            fmin_alpha = 8
            fmax_alpha = 13
            ind_min_alpha = argmax(f > fmin_alpha) - 1
            ind_max_alpha = argmax(f > fmax_alpha) - 1

            # calculate bandpower via integration
            bandpower_alpha = trapz(power[ind_min_alpha:ind_max_alpha],
                                    f[ind_min_alpha:ind_max_alpha])
            bandpower_theta = trapz(power[ind_min_theta:ind_max_theta],
                                    f[ind_min_theta:ind_max_theta])

            abs_band = np.zeros(self.n_bands)
            abs_band[0] = bandpower_theta
            abs_band[1] = bandpower_alpha

            # append for each channel
            baseline_band_mat.append(abs_band)

        return baseline_band_mat
Пример #33
0
    def transform(F):
        diff=Abel.diff(F)
        nx = len(F)
        x=np.arange(nx)

        integral = sp.zeros(nx, dtype=float)

        for i in range(0, nx-1):
            divisor = sp.sqrt(x[i:nx]**2 - x[i]**2)
            integrand = diff[i:nx] / divisor
            integrand[0] = integrand[1] # deal with the singularity at x=r
            integral[i] = - sp.trapz(integrand, x[i:nx]) / sp.pi

        return(integral)
Пример #34
0
    def transform(F):
        diff = Abel.diff(F)
        nx = len(F)
        x = np.arange(nx)

        integral = sp.zeros(nx, dtype=float)

        for i in range(0, nx - 1):
            divisor = sp.sqrt(x[i:nx]**2 - x[i]**2)
            integrand = diff[i:nx] / divisor
            integrand[0] = integrand[1]  # deal with the singularity at x=r
            integral[i] = -sp.trapz(integrand, x[i:nx]) / sp.pi

        return (integral)
Пример #35
0
def bandpower_Pxx(Pxx, f, fmin, fmax):
    ''' 
    Adapted from (see below) to read directly the power spectrum

    Taken (and adapted -- to not miss indexes) from the internet: 
    https://stackoverflow.com/questions/44547669/python-numpy-equivalent-of-bandpower-from-matlab
    Based on the matlab function (bandpower)

    INPUT:
    Pxx - power spectrum
    f - fequencies from the power spectrum
    fmin - lower band of the frequency range
    fmax - upper band of the frequency range

    Return the average power in the frequency range by integrating 
        the power spectral density (PSD) estimate, pxx (usingthe  trapezoidal rule)
    '''
    ind_min = sp.argmax(f > fmin) - 1
    ind_max = sp.argmax(f > fmax) - 1
    if ind_max != -1:
        var = sp.trapz(Pxx[ind_min:ind_max + 1], f[ind_min:ind_max + 1])
    elif ind_max == -1:
        var = sp.trapz(Pxx[ind_min:], f[ind_min:])
    return var
Пример #36
0
def integrate_evoked(evoked, axis=-1):
    """Integrate a peristumulus timecourse.

    Parameters
    ----------
    evoked : list of 2D arrays or 2D array
        values of evoked datapoints

    Returns
    -------
    int_evoked : squeezed array
        evoked values integrated over the time dimension

    """
    return sp.trapz(evoked, axis=axis)
Пример #37
0
def integrate_evoked(evoked, axis=-1):
    """Integrate a peristumulus timecourse.

    Parameters
    ----------
    evoked : list of 2D arrays or 2D array
        values of evoked datapoints

    Returns
    -------
    int_evoked : squeezed array
        evoked values integrated over the time dimension

    """
    return sp.trapz(evoked, axis=axis)
Пример #38
0
def coulumb(a, b, grid, end_point=-1):
    abc, asc, al, aj = a  # radial and angular for a
    bbc, bsc, bl, bj = b  # radial and angular for b
    ro, w, h = grid  # radial grid and weights
    end_ind = end_point < 0 and len(ro) or (sc.where(ro > end_point)[0][0]+1)
    cro = ro[:end_ind]
    cw = w[:end_ind]
    cabc = abc[:end_ind]
    casc = asc[:end_ind]
    cbbc = bbc[:end_ind]
    cbsc = bsc[:end_ind]
    bdens = cbbc**2 + cbsc**2
    inner = 1./cro*cumtrapz(bdens*cw, initial=0e0)*h
    outer = cumtrapz(bdens/cro*cw, initial=0e0)*h
    outer = outer[-1] - outer  # внешнее интегрирование от r до бесконечности
    adens = cabc**2 + casc**2
    res = sc.trapz(adens*(inner+outer)*cw)*h
    return res
Пример #39
0
 def Comoving_Distance(self, z):
     # print self.Ho, "is Ho. \n"
     Dh = self.Dh
     E_z = np.zeros(len(z))
     Dc = np.zeros(len(z))
     for n in range(len(z)):
         E_z[n] = np.sqrt(omega_M * np.power((1.0 + z[n]), 3.0) + omega_K * np.power((1.0 + z[n]), 2.0) + omega_L)
         Dc[n] = Dh * sp.trapz((1.0 / E_z[0:n]), z[0:n])
         # Dc[n]=Dh*sp.integrate.simps( (1.E_z[0:n]) , z[0:n] ,dx=666,axis=-1,even='avg')
     if omega_K > 0:
         Dm = Dh * (1.0 / np.sqrt(omega_K)) * np.sinh(np.sqrt(omega_K) * (Dc / Dh))
         print "Omega k > 0 \n"
     if omega_K == 0:
         Dm = Dc
         # print "Omega k = 0 \n"
     if omega_K < 0:
         Dm = Dh * (1.0 / np.sqrt(abs(omega_K))) * np.sin(np.sqrt(abs(omega_K)) * (Dc / Dh))
         print "Omega k < 0 \n"
     return Dm
Пример #40
0
def kde(data, N=None, MIN=None, MAX=None):

    # Parameters to set up the mesh on which to calculate
    N = 2**12 if N is None else int(2**sci.ceil(sci.log2(N)))
    if MIN is None or MAX is None:
        minimum = min(data)
        maximum = max(data)
        Range = maximum - minimum
        MIN = minimum - Range/10 if MIN is None else MIN
        MAX = maximum + Range/10 if MAX is None else MAX

    # Range of the data
    R = MAX-MIN

    # Histogram the data to get a crude first approximation of the density
    M = len(data)
    DataHist, bins = sci.histogram(data, bins=N, range=(MIN,MAX))
    DataHist = DataHist/M
    DCTData = scipy.fftpack.dct(DataHist, norm=None)

    I = [iN*iN for iN in range(1, N)]
    SqDCTData = (DCTData[1:]/2)**2

    # The fixed point calculation finds the bandwidth = t_star
    guess = 0.1
    try:
        t_star = scipy.optimize.brentq(fixed_point, 0, guess, 
                                       args=(M, I, SqDCTData))
    except ValueError:
        print('Oops!')
        return None

    # Smooth the DCTransformed data using t_star
    SmDCTData = DCTData*sci.exp(-sci.arange(N)**2*sci.pi**2*t_star/2)
    # Inverse DCT to get density
    density = scipy.fftpack.idct(SmDCTData, norm=None)*N/R
    mesh = [(bins[i]+bins[i+1])/2 for i in range(N)]
    bandwidth = sci.sqrt(t_star)*R
    
    density = density/sci.trapz(density, mesh)
    cdf = np.cumsum(density)*(mesh[1]-mesh[0])
    
    return bandwidth, mesh, density, cdf
Пример #41
0
def exchange_melem(a, b, c, grid, end_point=-1):
    abc, asc, al, aj = a  # radial and angular for a
    bbc, bsc, bl, bj = b  # radial and angular for b
    cbc, csc, cl, cj = c  # radial and angular for c
    if (cj != bj or cl != bl):
        return 0e0  # only angle-diagonal elements are nonzero

    jmax, jmin = max(aj, bj), min(aj, bj)
    ro, w, h = grid  # radial grid and weights
    end_ind = end_point < 0 and len(ro) or (sc.where(ro > end_point)[0][0]+1)
    cabc = abc[:end_ind]
    casc = asc[:end_ind]
    cbbc = bbc[:end_ind]
    cbsc = bsc[:end_ind]
    ccbc = cbc[:end_ind]
    ccsc = csc[:end_ind]
    cro = ro[:end_ind]
    cw = w[:end_ind]
    k = jmax - jmin
    if (k+al+bl) % 2 != 0:
        k += 1
    dens0b = cabc*cbbc+casc*cbsc
    dens0c = cabc*ccbc+casc*ccsc
    dens_kgb = dens0b/cro**(k+1)
    dens_klb = dens0b*cro**k
    dens_kgc = dens0c/cro**(k+1)
    dens_klc = dens0c*cro**k
    res = 0e0
    while (k <= (jmax+jmin)):
        dens_g_int = cumtrapz(dens_kgc*cw, initial=0e0)*h
        # внешнее интегрирование от r до бесконечности
        dens_g_int = dens_g_int[-1] - dens_g_int
        dens_l_int = cumtrapz(dens_klc*cw, initial=0e0)*h
        res_k = sc.trapz((dens_kgb*dens_l_int+dens_klb*dens_g_int)*cw)*h
        res += res_k*w3js0[(jmax, jmin, k)]
        k += 2
        dens_kgb = dens_kgb/cro**2
        dens_klb = dens_klb*cro**2
        dens_kgc = dens_kgc/cro**2
        dens_klc = dens_klc*cro**2
    return res
Пример #42
0
def coulumb_melem(a, b, c, grid, end_point=-1):
    abc, asc, al, aj = a  # radial and angular for a
    bbc, bsc, bl, bj = b  # radial and angular for b
    cbc, csc, cl, cj = c  # radial and angular for c
    if (cl != bl or cj != bj):
        return 0e0        # operator diagonal for angular variables
    ro, w, h = grid  # radial grid and weights
    end_ind = end_point < 0 and len(ro) or (sc.where(ro > end_point)[0][0]+1)
    cro = ro[:end_ind]
    cw = w[:end_ind]
    cabc = abc[:end_ind]
    casc = asc[:end_ind]
    cbbc = bbc[:end_ind]
    cbsc = bsc[:end_ind]
    ccbc = cbc[:end_ind]
    ccsc = csc[:end_ind]
    bdens = cbbc*ccbc + ccsc*cbsc
    inner = 1./cro*cumtrapz(bdens*cw, initial=0e0)*h
    outer = cumtrapz(bdens/cro*cw, initial=0e0)*h
    outer = outer[-1] - outer  # внешнее интегрирование от r до бесконечности
    adens = cabc**2 + casc**2
    res = sc.trapz(adens*(inner+outer)*cw)*h
    return res
    def compute(specter,lines):
        h=6.62606957*10**-34
        c=3*10**8
        kb=1.3806488*10**-23

        temp=[]
        z=[]

        for row, spec in enumerate(np.transpose(specter)):

            tempData=[]

            for key, line in enumerate(lines):
                intLine = sp.trapz(spec[line[0]:line[1]])

                #print(intLine)
                lambdaNist=line[2]
                Aki=line[3]
                Ek=line[4]
                g=line[5]
                if intLine>0:
                    nkgk=np.log(intLine/((h*c*Aki*g)/lambdaNist))
                    tempData.append((Ek,nkgk))

            if len(tempData)>0:
                tempArray=np.array(tempData)
                print(tempArray)
                koef=np.polyfit(tempArray[:,0], tempArray[:,1], 1)
                temp.append(koef[0])
                z.append(row)



        print(temp)
        plt.plot(z,temp)
        plt.show()
Пример #44
0
def energy(x, u):
    return scipy.trapz(abs(u)**2, x)
Пример #45
0
    def __init__(self, xvals, yvals):
        # if order is reversed, flip it
        if xvals[0] > xvals[-1]:
            xvals = xvals[::-1]
            yvals = yvals[::-1]

        # number of intervals to partition our range
        nsamp = options['pdf']['numpart']

        # for pdfs with tails, set the range for sampling
        _range = options['pdf']['range']
        range = [(1.0 - _range)/2.0, (1.0 + _range)/2.0]

        self.x = xvals

        if len(xvals) == 1 or xvals[0] == xvals[-1]:
            self.x = [xvals[0]]
            self.y = [1]
            self.cdfy = [1]
            self.mean = xvals[0]
            self.dev = 0
            return

        self.cdfy = np.append([0.0], np.cumsum((np.diff(yvals)/2.0 + yvals[:-1])*np.diff(xvals)))
        self.cdfy /= self.cdfy[-1]

        # Trim tails that have grown to 10% of the range of the PDF
        resample = False
        mmin, mmax = self.ppf([0, 1])
        dist = mmax - mmin

        if dist == 0.0:
            self.x = [xvals[0]]
            self.y = [1]
            self.cdfy = [1]
            self.mean = xvals[0]
            self.dev = 0
            return

        # print "range of pdf = [%s - %s]" % (mmin, mmax)
        # print "range of PDF = [%s - %s]" % (xvals[0], xvals[-1])
        # print "dist=%s" % (dist)
        # print "proposed range = [%s - %s]" % (self.ppf(range[0]), self.ppf(range[1]))

        if np.isnan(mmin) or abs((mmin - self.ppf(range[0])) / dist) > .1:
            mmin = self.ppf(range[0])
            resample = True
        else:
            mmin = xvals[0]

        if np.isnan(mmax) or abs((mmax - self.ppf(range[1])) / dist) > .1:
            mmax = self.ppf(range[1])
            resample = True
        else:
            mmax = xvals[-1]

        # resample if not even spacing
        if not resample:
            resample = not np.allclose(np.diff(xvals)[0], np.diff(xvals))

        # resample if number of intervals is 10% too large or small
        if not resample:
            resample = np.abs(len(xvals) - nsamp) > (nsamp * .1)

        if resample:
            self.x = np.linspace(mmin, mmax, nsamp)
            self.y = np.interp(self.x, xvals, yvals)
            self.y = np.abs(self.y / trapz(self.y, self.x))
            self.cdfy = np.append([0.0], np.cumsum((np.diff(self.y)/2.0 + self.y[:-1])*np.diff(self.x)))
        else:
            # normalize (integral must be 1.0)
            self.y = yvals / trapz(yvals, self.x)

        self.mean = trapz(self.x * self.y, self.x)
        self.dev = np.sqrt(np.abs(trapz(self.y * (self.x - self.mean)**2, self.x)))
for l in range(1,lmax):
    k_star = 2/sp.exp(1)*(1-sp.log(2)/(2*l))*l/x
    indices_term1 = (k_star < ks) & (ks < 1./R)
    indices_term2 = ks >= 1./R
#    if k_star < 1./R:
#        indices_term1 = (k_star < ks) & (ks < 1./R)
#        factor_term1 = 1
#    else:
#        indices_term1 = (1/R < ks) & (ks < k_star)
#        factor_term1 = -1
        
    integrand_term1 = 1./(ks[indices_term1]*x)**2*Pmk[indices_term1]
    integrand_term2 = 1./((ks[indices_term2]*x)**2*(ks[indices_term2]*R))*Pmk[indices_term2] 

    integral_term1 = sp.trapz(integrand_term1,ks[indices_term1])
    integral_term2 = sp.trapz(integrand_term2,ks[indices_term2])    
    C[l] = H**2*f**2/sp.pi*(integral_term1 + integral_term2)
#    
#for l in range(1,lmax):
#    C[l] = 4*sp.pi/(2*l+1)*(l*B[l-1]+(l+1)*B[l+1])
#
# 
##plt.plot(ks,Pmk)
##plt.xscale('log')
##plt.yscale('log')
##plt.xlabel('$k [Mpc^{-1}]$')
##plt.ylabel('P(k) $[Mpc^{3}]$')
##plt.axis([0.001, 10, 1e-4, 1e5])
#
ls = sp.array(range(1,lmax+1))
Пример #47
0
                     

#     y=mdotOverTime
    
#     ax.plot(timeArr,  y)
#     ax.plot(timeArr,  log10(mdotOverTimeR))
#     ax.plot(timeArr,  (mdotOverTimeUD))    
#     set_fonts_etc(ax)

    ax.plot(timeArr,  log10(abs(mdotAccrAvr)))
    
    

    show()
    
    exit()
    
    


if (plotMassLoss):     
    
    mLost = trapz(mdotOverTime, timeArr* 4.8*10**3)
    print("mLost=", mLost)
    
    ax.plot(timeArr,  (mdotOverTime))
    
#ax.plot(simTime, mv1)

plt.show()
Пример #48
0
 def integrate(func_vals):
     return sc.trapz(func_vals, self.grid)
for n in range(args.num + 1):
    eigenvalue = eigenvalues[n]
    eigenvector = eigenvectors[:, n]
    eigenvector /= scipy.sqrt(util.energy(x, eigenvector))
    eigenvectors[:, n] = eigenvector


mode_numbers = scipy.arange(args.num)

coefficients = scipy.zeros((len(t), len(mode_numbers)))
for n in mode_numbers:
    eigenvector = eigenvectors[:, n]
    for idx, _ in enumerate(t):
        state = states[idx, :]
        coefficient = scipy.trapz(eigenvector * state, x)
        coefficients[idx, n] = abs(coefficient)
# coefficients[coefficients == 0] = None
# coefficients = scipy.log10(coefficients)


estimate = scipy.zeros(len(t))
for n in mode_numbers:
    estimate += coefficients[:, n]**2

energy = scipy.zeros(len(t))
for idx, _ in enumerate(t):
    energy[idx] = util.energy(x, states[idx, :])


# Sort the modes by maximum value at zero.
Пример #50
0
def kinEnergyLoss(dat):    
    
    i0  = dat.Nz - 3
    dEkn =  pi* dat.x[:]*dat.Rsc * dat.dd[:,js]*dat.Dsc  * (dat.u1[i0, :]*dat.Usc)**3
    ekinTot =trapz(  2.*dEkn, dat.x*dat.Rsc) 
    return(ekinTot)
from source_hfd_python.hfd_dat import Hfd
from scipy import trapz, exp, where


def str2nlj(s):
    n = int(s[0])
    l = 'spdfgh'.index(s[1])
    j = float(s[2:s.find('/')])/2e0
    return (n, l, j)

hfd = Hfd()
grid = (hfd.grid, hfd.weights, hfd.h)
nc = where(hfd.grid > 0.5)[0][0]
print(nc)
testgrid = exp(-hfd.grid)
res = trapz(testgrid[:nc]*hfd.weights[:nc])*hfd.h
print("test grid")
print("{} - {} = {}\n".format(res, (1-exp(-0.5)), res-1.0+exp(-0.5)))
rc = 5e-1
csn = sys.argv[1]  # core shell
vsns = sys.argv[2:]  # valent shells
ncc, lc, jc = str2nlj(csn)
p, q, en = hfd[csn]
print("norm function")
core_orb = (p, q, lc, jc)
norm2rc = trapz((p[:nc]**2+q[:nc]**2)*hfd.weights[:nc])*hfd.h
norm2 = trapz((p**2+q**2)*hfd.weights[:hfd.imax])*hfd.h
print("||{}||^2  = {}\t{}".format(csn, norm2rc, norm2))
v_orbs = []
for v in vsns:
    nv, lv, jv = str2nlj(v)
Пример #52
0

def ctrapz(y, x):
    x_shape = x.shape[0]
    y_shape = 1
    if y.ndim > 1:
        y_shape = y.shape[0]

    res = np.zeros(y_shape)

    C_code = '''
            #line 29 "ctrapz.py"
            double result=0;
            for(int j = 0; j < y_shape; j++){
                for(int i = 0; i < x_shape-1; i++){
                    result += (y(i) + y(i+1)) / 2.0 * (x(i+1) - x(i));  
                    };
                 res(j) = result;
                 result=0;
            };
            '''

    weave.inline(C_code, ['x', 'y', 'x_shape', 'y_shape', 'res'], type_converters=weave.converters.blitz, compiler='gcc')
    return res

if __name__ == '__main__':
    x = np.array([1, 2, 3, 4])
    y = np.array([3, 2, 3, 4])
    print ctrapz(y, x)
    print trapz(y, x)