Exemplo n.º 1
0
	def integrate( self, age ):

		# calculate range to integrate over
		lower_bound = age - self.maxage
		upper_bound = age - self.minage

		# if upper_bound is negative then this is a later region and we are working on early ages
		# If so, return zeros
		if upper_bound < 0: return ( np.zeros( self.ls.size ), 0 )

		# find things in the age range (include a little extra to account for rounding errors)
		inds = np.where( (self.ages >= lower_bound-age*1e-5) & (self.ages <= upper_bound+age*1e-5) )[0]

		# simpsons rule is based on intervals, so include the SED one age lower if it exists
		# otherwise one interval will be missed at every boundary
		if inds[0] > 0 and np.abs( self.ages[inds[0]] - lower_bound ) > 1e-5*age:
			inds = np.append( inds[0]-1, inds )

		weights = self.sfh_func( age-self.ages[inds] )

		# if weights are all zero then there is no star formation in this region and therefore no need to integrate
		if max( weights ) <= 0:
			return ( np.zeros( self.ls.size ), 0 )

		if self.has_dust:
			# integrate weights*sed*dust
			seds = integrate.simps( weights*self.seds[:,inds]*dust_wrapper( self.ages[inds], self.ls ), x=self.ages[inds], even='avg' )
		else:
			# integrate weights*sed
			seds = integrate.simps( weights*self.seds[:,inds], x=self.ages[inds], even='avg' )

		# integrate weights*mass
		mass = integrate.simps( weights*self.masses[inds], x=self.ages[inds], even='avg' ) if self.has_masses else 0

		return ( seds, mass )
def eigenvalue(z, V, psi, m):
    """Calcula um autovalor como E=<Psi|H|Psi>/<Psi|Psi>
    onde H = T + V, T eh o operador de energia cinetica em uma dimensao

    Params
    ------

    z : array_like
        o eixo z
    V : array_like
        o potencial
    psi : array_like
        a funcao de onda psi(z)
    m : array_like
        a massa efetiva m*(z)

    Returns
    -------

    O autovalor E=<Psi|H|Psi>/<Psi|Psi>
    """
    dz = np.append(z[1:]-z[:-1], z[1]-z[0])
    dz2 = dz**2
    h_psi = np.zeros(N, dtype=np.complex_)

    for i in range(N):
        h_psi[i] = ((0.5/dz2[i])*(1.0/idf(m, i+0.5) +
                                  1.0/idf(m, i-0.5))+V[i])*psi[i]
        if i > 0:
            h_psi[i] += -(0.5/dz2[i])*(psi[i-1]/idf(m, i-0.5))
        if i < N-1:
            h_psi[i] += -(0.5/dz2[i])*(psi[i+1]/idf(m, i+0.5))

    psi_h_psi = simps(psi.conj()*h_psi, z)
    return (psi_h_psi / simps(psi.conj()*psi, z)).real
 def weight_CLAP_Data(self, lbda_cent):
     """
     Compute the weighting function for the Clap data
     to assign a weight due to the particular shape for
     the interested fiber arm (which is always the same
     for CLAP 1). With this is possible to convert the
     CLAP light level from ADUs to W.
     
     INPUT:
             lbda_cent: the central wavelength as observed by SNIFS
                        in Angstrom
     OUTPUT:
             weight_funct: the weighting function to  apply to CLAP light
                           level to convert the ADU in W 
     """
     lbd_weight = N.linspace(0.,99.940, 1001)
     lbd_line = N.linspace(lbd_weight[0]-lbd_weight[len(lbd_weight)/2],lbd_weight[-1]-lbd_weight[len(lbd_weight)/2],len(lbd_weight))+lbda_cent
     clap1_int = N.mean([d(lbd_line) for d in self.clap1_simul], axis=0)
     err_clap1_int = N.var([d(lbd_line) for d in self.clap1_simul], axis=0)
     integ_weight = integrate.simps(self.Inter1*clap1_int, lbd_line)
     sqr_Inter1 = self.Inter1**4
     sqr_clap1 = clap1_int**2
     Int_func = integrate.simps(self.Inter1, lbd_line)
     Int_sqr = integ_weight**2
     
     weight_funct = Int_func/integ_weight
     weight_funct_err = ((self.var_Inter1[0]*(Int_sqr+sqr_clap1[0]-2*integ_weight*clap1_int[0])+(sqr_Inter1[0]*err_clap1_int[0]))\
                         +4*N.sum((self.var_Inter1[2:-2:2]*(Int_sqr+sqr_clap1[2:-2:2]-2*integ_weight*clap1_int[2:-2:2])\
                         +(sqr_Inter1[2:-2:2]*err_clap1_int[2:-2:2])))+16*N.sum((self.var_Inter1[1:-1:2]*(Int_sqr\
                         +sqr_clap1[1:-1:2]-2*integ_weight*clap1_int[1:-1:2])+(sqr_Inter1[1:-1:2]*err_clap1_int[1:-1:2])))\
                         +((self.var_Inter1[-1]*(Int_sqr+sqr_clap1[-1]-2*integ_weight*clap1_int[-1])\
                         +(sqr_Inter1[-1]*err_clap1_int[-1]))))*(99.940/1001.)**2/(1892.25*9.*Int_sqr**2)
     # We have to compute the error on that!!!
     return (weight_funct/43.5),weight_funct_err
Exemplo n.º 4
0
def beamfrackernel(kernelx, kernely, length, angle):
    """
    The beam fraction intercepted by a sample, used for calculating footprints.

    Parameters
    ----------
    kernelx: array-like
        x axis for the probability kernel
    kernely: array-like
        probability kernel describing the intensity distribution of the beam
    length: float
        length of the sample
    angle: float
        angle of incidence (degrees)

    Returns
    -------
    fraction: float
        The fraction of the beam intercepted by a sample.
    """
    height_of_sample = length * np.sin(np.radians(angle))
    total = integrate.simps(kernely, kernelx)
    lowlimit = np.where(-height_of_sample / 2. >= kernelx)[0][-1]
    hilimit = np.where(height_of_sample / 2. <= kernelx)[0][0]

    area = integrate.simps(kernely[lowlimit: hilimit + 1],
                           kernelx[lowlimit: hilimit + 1])
    return area / total
Exemplo n.º 5
0
def print_model_results(model_name, means, sigmas, header=True):

	med_mean = np.median(means)
	n_data = float(np.size(means))
	n_params = likelihood.ndim

	model_grids = model_grids(model_name)
	likelihood = likelihood(means, sigmas, med_mean, *model_grids[0:6])
	posterior, evidence = posterior_evidence(likelihood, *model_grids[6:8])
	AIC, AICc, BIC = info_criteria(likelihood, n_data)

	marg_post = 1.0*posterior
	while marg_post.ndim != 1:
		marg_post = integrate.simps(marg_post)

	cdf = np.zeros_like(marg_post)
	for j in range(1,cdf.size):
		cdf[j] = integrate.simps(p_ld[0:j+1],ld[0:j+1])
	interp_from_cdf = interp1d(cdf,ld)
	cdf_prctls = (int_cdf(0.5), int_cdf(0.84)-int_cdf(0.5), int_cdf(0.5)-int_cdf(0.16))

	if header==True:
		print 'model (M_k) \t N_free \t P(mu0 | D, M_k) \t log evidence \t AIC \t AICc \t BIC'
	print model_name+'\t %1i \t %.2f^{+%.2f}_{-%.2f} \t %.2f \t %.2f \t %.2f \t %.2f' % \
			(n_params, interp_from_cdf(0.5), interp_from_cdf(0.84)-interp_from_cdf(0.5), 
				interp_from_cdf(0.5)-interp_from_cdf(0.16), np.log10(evidence), AIC, AICc, BIC)

	return
Exemplo n.º 6
0
def cos(x,frequency,phase,amplitude,offset,xlo,xhi,efficiency=None,num_int_points=100,subranges=None):

    xnorm = np.linspace(xlo,xhi,num_int_points)
    ynorm = offset + amplitude*np.cos(frequency*xnorm + phase)

    if efficiency!=None:
        ynorm *= efficiency(xnorm)

    normalization = integrate.simps(ynorm,x=xnorm)

    # Subranges of the normalization.
    if subranges!=None:
        normalization = 0.0
        for sr in subranges:
            xnorm = np.linspace(sr[0],sr[1],num_int_points)
            #ynorm = np.exp(-slope*xnorm)
            ynorm = offset + amplitude*np.cos(frequency*xnorm + phase)

            if efficiency!=None:
                ynorm *= efficiency(xnorm)

            normalization += integrate.simps(ynorm,x=xnorm)
            #print "building normalization: ", normalization

    #y = np.exp(-slope*x)/normalization
    y = offset + amplitude*np.cos(frequency*x + phase)

    if efficiency!=None:
        y *= efficiency(x)

    return y/normalization
Exemplo n.º 7
0
def exp(x,slope,xlo,xhi,efficiency=None,num_int_points=100,subranges=None):

    xnorm = np.linspace(xlo,xhi,num_int_points)
    ynorm = np.exp(-slope*xnorm)

    if efficiency!=None:
        ynorm *= efficiency(xnorm)

    normalization = integrate.simps(ynorm,x=xnorm)

    # Subranges of the normalization.
    if subranges!=None:
        normalization = 0.0
        for sr in subranges:
            xnorm = np.linspace(sr[0],sr[1],num_int_points)
            ynorm = np.exp(-slope*xnorm)

            if efficiency!=None:
                ynorm *= efficiency(xnorm)

            normalization += integrate.simps(ynorm,x=xnorm)

    y = np.exp(-slope*x)/normalization

    #'''
    if efficiency!=None:
        y *= efficiency(x)
    #'''

    return y
Exemplo n.º 8
0
    def __init__(self, centre, width, steps=200):
        """

        :type steps: int
        """
        super(TophatFilter, self).__init__()
        self.centre = centre * u.angstrom
        self.width = width * u.angstrom
        self.steps = steps

        upper, lower = self.centre + self.width, self.centre - self.width
        resp_upper, resp_lower = self.centre + (self.width * 0.5), self.centre - (self.width * 0.5)

        self.wave = np.linspace(lower, upper, steps)
        self.response = np.zeros_like(self.wave.value)

        tophat = (self.wave >= resp_lower) * (self.wave < resp_upper)
        self.response[tophat] = 1

        self.freq = (c.c / self.wave).to(u.Hz)

        self.lambda_c = (simps(self.wave * self.response, self.wave) /
                         simps(self.response, self.wave))

        self.nu_c = (simps(self.freq * self.response, self.freq) /
                     simps(self.response, self.freq))
        self.fwhm = self.width
Exemplo n.º 9
0
def calculateViolation(predictLine, allocateline, startTime ,endTime):
    stepSize = 1
    violateArea = 0
    violateTime = 0
    area_violations = []
    time_violations  = []
    for i in  drange(startTime + stepSize, endTime, stepSize):
        predicted_i0 = getValue(predictLine,i - stepSize)
        predicted_i1 = getValue(predictLine,i)
        ##print("Predicty=  i0 :%s i1:%s" %(predicted_i0,predicted_i1))
        allocated_i0 = getValue(allocateline, i - stepSize)
        allocated_i1 = getValue(allocateline,i)
        area_under_predicted  = simps(y = [predicted_i0, predicted_i1] , dx = stepSize)
        area_under_allocated  = simps(y = [allocated_i0, allocated_i1] , dx = stepSize)
        ##print("Areas : %s, %s"  %(area_under_predicted,area_under_allocated))
        if area_under_allocated < area_under_predicted:
            violateArea += (area_under_predicted -area_under_allocated)
            violateTime += stepSize
        area_violations.append(violateArea)
        time_violations.append(violateTime)

    area = np.array(area_violations)
    time = np.array(time_violations)
    xvalue =  np.arange(startTime+stepSize, endTime, stepSize)
    f , (plt1, plt2) = plt.subplots(1,2, sharex= True)
    plt1.plot(xvalue,area)
    plt2.plot(xvalue,time)
    #plt.show()
    #print("ViolateArea : %s ViolateTime : %s" %(violateArea, violateTime))
    return  violateArea, violateTime
Exemplo n.º 10
0
 def fit_single_line_BS(self, x, y, zero_lev, err_continuum, fitting_parameters, bootstrap_iterations = 1000):
     
     #Declare parameters and containers for the fit
     params_dict     = fitting_parameters.valuesdict()
     initial_values  = [params_dict['A0'], params_dict['mu0'], params_dict['sigma0']]
     area_array      = empty(bootstrap_iterations)
     params_array    = empty([3, bootstrap_iterations])
     n_points        = len(x)
                         
     #Perform the fit
     for i in range(bootstrap_iterations):
         y_new               = y + np_normal_dist(0, err_continuum, n_points)
         area_array[i]       = simps(y_new, x) - simps(zero_lev, x)
         best_vals, covar    = curve_fit(gaussian_curveBS, (x, zero_lev), y_new, p0=initial_values, maxfev = 1600)
         params_array[:,i]   = best_vals
                 
     #Compute Bootstrap output
     mean_area, std_area = mean(area_array), std(area_array)
     mean_params_array, stdev_params_array = params_array.mean(1), params_array.std(1)
     
     #Store the data
     self.fit_dict['area_intg'],     self.fit_dict['area_intg_er']   = mean_area, std_area
     self.fit_dict['A0_norm'],       self.fit_dict['A0_norm_er']     = mean_params_array[0], stdev_params_array[0]
     self.fit_dict['mu0_norm'],      self.fit_dict['mu0_norm_er']    = mean_params_array[1], stdev_params_array[1]  
     self.fit_dict['sigma0_norm'],   self.fit_dict['sigma0_norm_er'] = mean_params_array[2], stdev_params_array[2]  
             
     A = ufloat(mean_params_array[0], stdev_params_array[0]) 
     sigma = ufloat(mean_params_array[2], stdev_params_array[2])         
     fwhm0_norm = 2.354820045 * sigma
     areaG0_norm = A * sigma * self.sqrt2pi
   
     self.fit_dict['fwhm0_norm'], self.fit_dict['fwhm0_norm_er'] = fwhm0_norm.nominal_value, fwhm0_norm.std_dev
     self.fit_dict['area_G0_norm'], self.fit_dict['area_G0_norm_er'] = areaG0_norm.nominal_value, areaG0_norm.std_dev
                        
     return
Exemplo n.º 11
0
    def __init__(self, filepath, maxbins=1000):
        """

        :type filepath: string
        :type maxbins: int
        """
        super(FileFilter, self).__init__()
        self.path = filepath

        data = np.loadtxt(self.path)
        wf = data[:, 0]
        tp = data[:, 1]
        if len(data[:, 0]) < maxbins:  # Re-sample large filters for performance
            wfx = np.linspace(wf[0], wf[-1], maxbins)
            tpx = griddata(wf, tp, wfx)

            wf = wfx
            tp = tpx

        self.wave = wf * u.angstrom
        self.response = tp

        self.freq = (c.c / self.wave).to(u.Hz)

        nmax = np.argmax(self.response)
        halfmax_low = self.wave[:nmax][np.argmin(np.abs(self.response[nmax] - 2 * self.response[:nmax]))]
        halfmax_hi = self.wave[nmax:][np.argmin(np.abs(self.response[nmax] - 2 * self.response[nmax:]))]

        self.fwhm = halfmax_hi - halfmax_low

        self.lambda_c = (simps(self.wave * self.response, self.wave) /
                         simps(self.response, self.wave)) * u.angstrom

        self.nu_c = (simps(self.freq * self.response, self.freq) /
                     simps(self.response, self.freq)) * u.Hz
Exemplo n.º 12
0
def KramersKronig(f, re, im, usezero=False):
    """Return real/imaginary parts retrieved by Kramers-Kronig relations.

    formulas including singularity removal according to Boukamp (1993)
    """
    from scipy.integrate import simps

    x = f * 2. * pi
    im2 = np.zeros(im.shape)
    re2 = np.zeros(im.shape)
    re3 = np.zeros(im.shape)
    drdx = np.diff(re) / np.diff(x)
    dredx = np.hstack((drdx[0], (drdx[:-1] + drdx[1:]) / 2, drdx[-1]))
    didx = np.diff(im) / np.diff(x)
    dimdx = np.hstack((didx[0], (didx[:-1] + didx[1:]) / 2, didx[-1]))
    for num, w in enumerate(x):
        x2w2 = x**2 - w**2
        x2w2[num] = 1e-12
        fun1 = (re - re[num]) / x2w2
        fun1[num] = dredx[num] / 2 / w
        im2[num] = -simps(fun1, x) * 2. * w / pi
        fun2 = (im * w / x - im[num]) / x2w2
        re2[num] = simps(fun2, x) * 2. * w / pi + re[0]
        fun3 = (im * x - im[num] * w) / x2w2
        fun3[num] = (im[num] / w + dimdx[num]) / 2
        re3[num] = simps(fun3, x) * 2. / pi + re[-1]

    if usezero:
        re3 = re2

    return re3, im2
Exemplo n.º 13
0
    def fit_single_line(self, x, y, zero_lev, err_continuum, fitting_parameters, bootstrap_iterations = 1000):

        #Simple fit
        if self.fit_dict['MC_iterations'] == 1:
            fit_output = lmfit_minimize(residual_gauss, fitting_parameters, args=(x, y, zero_lev, err_continuum))
            self.fit_dict['area_intg'] = simps(y, x) - simps(zero_lev, x)
            self.fit_dict['area_intg_err'] = 0.0
             
        #Bootstrap
        else:
            mini_posterior  = Minimizer(lnprob_gaussCurve, fitting_parameters, fcn_args = ([x, y, zero_lev, err_continuum]))
            fit_output      = mini_posterior.emcee(steps=200, params = fitting_parameters)
            
            #Bootstrap for the area of the lines
            area_array = empty(bootstrap_iterations) 
            len_x_array = len(x)
            for i in range(bootstrap_iterations):
                y_new =  y + np_normal_dist(0.0, err_continuum, len_x_array)
                area_array[i] = simps(y_new, x) - simps(zero_lev, x)
            self.fit_dict['area_intg'] = mean(area_array)
            self.fit_dict['area_intg_err'] = std(area_array)           
        
        #Store the fitting parameters
        output_params = fit_output.params
        for key in self.fit_dict['parameters_list']:
            self.fit_dict[key + '_norm'] = output_params[key].value
            self.fit_dict[key + '_norm_er'] = output_params[key].stderr
            
        return
Exemplo n.º 14
0
def int_func(**kwargs):
    if iter(list(kwargs.items())) == 1 :
        f = kwargs.xin
        x = numpy.arrange(numpy.size(f))*1.
    else:
        f = kwargs.fin
        x = kwargs.xin
    
    n = numpy.size(f)
  
    g = numpy.zeros(n)
    
    if kwargs.simple != None :
     # Just use trapezium rule
     
        g[0] = 0.0
        for i in range (1, n):
            g[i] = g[i-1] + 0.5*(x[i] - x[i-1])*(f[i] + f[i-1])
         
    else:
     
        n2 = numpy.int(old_div(n,2))
     
        g[0] = 0.0
        for i in range (n2, n) :
            g[i] = integrate.simps( f[0:i], x[0:i] )
      
      
        for i in range (1, n2) :
            g[i] = g[n-1] - integrate.simps( f[i:], x[i:] )
      
   
    
    return g 
Exemplo n.º 15
0
def sfq0(rdfX,rdfY,ndens,Lmax=20.0,qbins=1024,damped=None):
    minq,maxq,dq=0,Lmax,Lmax/qbins
    qs=[i*dq+minq for i in range(qbins)]
    qs[0]=1E-10

    rdfY=np.array(rdfY)
    rdfX=np.array(rdfX)
    dx = rdfX[1]-rdfX[0]

    #Extend the h(r) to get a better estimate near q0
    cr,grExtx,grExty = rdfExtend(rdfX,rdfY,ndens,rmax=50.0,Niter=25,T=1000.0,rm=2.5,eps=-1,damped=0.1)

    sf1=list()
    for i,q in enumerate(qs):
        sf1 += [1 + 4*pi*ndens * integrate.simps((rdfY-1.0)*np.sin(q*rdfX)*rdfX/q ,dx=dx)]

    import pylab as pl
    
    sf2=list()
    for i,q in enumerate(qs):
        sf2 += [1 + 4*pi*ndens * integrate.simps((grExty-1.0)*np.sin(q*grExtx)*grExtx/q ,dx=dx)]
    #        R=1.0/q
    #        alpha = np.array([(1-rij/2.0/R)**2 * (1+rij/4.0/R) if rij<2*R else 0 for rij in rdfX])
    #        sf2 += [1 + 4*pi*ndens * integrate.simps((rdfY-1.0)*np.sin(q*rdfX)*rdfX/q*alpha,dx=dx)]

    f=open("/home/acadien/Dropbox/sfq0.dat","w")
    a=map(lambda x: "%f %f\n"%(x[0],x[1]),zip(qs,sf2))
    f.writelines(a)
#    exit(0)
    pl.plot(qs,sf1)
    pl.plot(qs,sf2)
    pl.show()
    exit(0)
Exemplo n.º 16
0
    def get_dband_center(self, d_cols):
        """
        Get d-band center of the DosX object.

        Parameters:
        -----------
        d_cols: The column number range for d orbitals, tuple of int.

        Examples:
        ---------
        # The 5 - 9 columns are state density for d orbitals.
        >>> dos.get_dband_center(d_cols=(5, 10))
        """

        # 合并d轨道DOS
        start, end = d_cols
        yd = np.sum(self.data[:, start:end], axis=1)

        #获取feimi能级索引
        for idx, E in enumerate(self.data[:, 0]):
            if E >= 0:
                nfermi = idx
                break
        E = self.data[: nfermi+1, 0]  # negative inf to Fermi
        dos = yd[: nfermi+1]          # y values from negative inf to Fermi
        # Use Simpson integration to get d-electron number
        nelectro = simps(dos, E)
        # Get total energy of dband
        tot_E = simps(E*dos, E)
        dband_center = tot_E/nelectro
        self.dband_center = dband_center

        return dband_center
Exemplo n.º 17
0
def int_func( xin, fin=None, simple=None):
    if fin is None :
        f = copy.deepcopy(xin)
        x = numpy.arange(numpy.size(f)).astype(float)
    else:
        f = copy.deepcopy(fin)
        x = copy.deepcopy(xin)
    
    n = numpy.size(f)

    g = numpy.zeros(n)

    if simple is not None :
     # Just use trapezium rule
     
        g[0] = 0.0
        for i in range (1, n) :
            g[i] = g[i-1] + 0.5*(x[i] - x[i-1])*(f[i] + f[i-1])
         
    else:
     
        n2 = numpy.int(old_div(n,2))
     
        g[0] = 0.0
        for i in range (n2, n) :
            g[i] = simps( f[0:i+1], x[0:i+1])


            
        for i in range (1, n2) :
            g[i] = g[n-1] - simps( f[i::], x[i::])
             
    return g
Exemplo n.º 18
0
def radiation_integrals(lattice, twiss_0, nsuperperiod = 1):
    #TODO: add I4 for rectangular magnets I4 = Integrate(2 Dx(z)*k(z)*h(z), Z)
    
    n_points_element = 20
    
    tws_elem = twiss_0
    (I1, I2, I3,I4, I5) = (0., 0., 0., 0., 0.)
    h = 0.
    for elem in lattice.sequence:
        if elem.__class__ in (SBend, RBend, Bend) and elem.l != 0:
            Dx = []
            Hinvariant = []
            Z = []
            h = elem.angle/elem.l

            for z in linspace(0, elem.l,num = n_points_element, endpoint=True):
                tws_z = elem.transfer_map(z)*tws_elem
                Dx.append(tws_z.Dx)
                Z.append(z)
                Hx = (tws_z.gamma_x*tws_z.Dx*tws_z.Dx + 2.*tws_z.alpha_x*tws_z.Dxp*tws_z.Dx
                                        + tws_z.beta_x*tws_z.Dxp*tws_z.Dxp)
                Hinvariant.append(Hx)
            #H = array(h)
            H2 = h*h
            H3 = abs(h*h*h)
            I1 += h*simps(array(Dx), Z)
            I2 += H2*elem.l  #simps(H2, Z)*nsuperperiod
            I3 += H3*elem.l  #simps(H3, Z)*nsuperperiod
            I4 += h*(2*elem.k1 + H2)*simps(array(Dx), Z)
            I5 += H3*simps(array(Hinvariant), Z)
        tws_elem = elem.transfer_map*tws_elem
    #if abs(tws_elem.beta_x - twiss_0.beta_x)>1e-7 or abs(tws_elem.beta_y - twiss_0.beta_y)>1e-7:
    #    print( "WARNING! Results may be wrong! radiation_integral() -> beta functions are not matching. ")
        #return None
    return (I1*nsuperperiod,I2*nsuperperiod,I3*nsuperperiod, I4*nsuperperiod, I5*nsuperperiod)
Exemplo n.º 19
0
def nnf(func,normrange=(0,10),data=None,num_int_points=100,verbose=False,subnormranges=None):

    xnorm = np.linspace(normrange[0],normrange[1],num_int_points)

    # Subranges of the normalization.
    normalization = 1.0
    if subnormranges!=None:
        xnormtot = []
        normalization = 0.0
        for sr in subnormranges:
            xnorm = np.linspace(sr[0],sr[1],num_int_points)
            ynorm = func.pdf(xnorm)

            normalization += integrate.simps(ynorm,x=xnorm)
            xnormtot += xnorm.tolist()
        xnorm = np.array(xnormtot)
    else:
        normalization = integrate.simps(func.pdf(xnorm),x=xnorm)

    if verbose:
        print("normalization: {0}".format(normalization))

    if data is None:
        return func.pdf(xnorm)/normalization,xnorm
    else:
        return func.pdf(data)/normalization,xnorm
Exemplo n.º 20
0
    def Matsubara(self, iom):
        """ evaluate self-energy at matsubara axis
        """
        gm = 1/self.a2
        (x0, dh0) = swing_make_mesh(500, 1e-5*gm, 300*gm, gm)
        F0 = array([self.Fun(x) for x in x0])
        F00 = self.Fun(0.0)
        weigh0 = abs(self.expan_i[0]) / (pi*F00)

        datai=zeros(len(x0),dtype=float)
        datar=zeros(len(x0),dtype=float)
        F0i=[]
        F0r=[]
        for n in range(len(iom)):
            omn = iom[n]
            if (omn<0.3): subtract=1
            else: subtract=0
            for i in range(len(F0)):
                datai[i] = (F0[i]-F00*subtract)/(omn**2+x0[i]**2)
                datar[i] = F0[i]*x0[i]/(omn**2+x0[i]**2)
            wi = -(omn*integrate.simps(datai, x0) + F00*pi*subtract)
            wr = -integrate.simps(datar, x0)
            F0i.append(wi)
            F0r.append(wr)
            
        return (array(F0r), array(F0i), weigh0)
Exemplo n.º 21
0
    def filtcheck(self, bandpass, z, frac=0.75, survey="Euclid", f_index=-1):
        """
        check if the redshifted effective wavelength is redder than the effective
        wavelength of the reddest filter (yes, its a complicated sentence)
        Input is a bandpass (as a string) and redshift
        
        """
        bp_rest = sncosmo.get_bandpass(bandpass)
        if survey == "Euclid":
            effwave = self.effwave_arr
            filtarr = self.filtarr
            
        elif survey == "LSST":
            effwave = self.lsst_effwave_arr
            filtarr = self.lsst_filtarr
            
        if bp_rest.wave_eff*(1+z)  > effwave[f_index]:
            filtfun=filtarr[effwave==effwave[f_index]][0]

            #condition: check what fraction of the redshifted filter is 
            cond = bp_rest.wave*(1+z) < max(filtfun.wave[filtfun.trans > 1e-4])
            
            simp_prod = simps(bp_rest.trans, bp_rest.wave)
            if len(bp_rest.wave[cond])>10:
                simp_prod_cond = simps(bp_rest.trans[cond],bp_rest.wave[cond])
            else:
                simp_prod_cond=0
            if simp_prod_cond/simp_prod > frac:
                return 1
            else:
                print "rest-frame filter is too red for observation"
                return 0
        else:        
            return 1
Exemplo n.º 22
0
def _prob_I(v, x_t):
    if v == 1:
        if prob_I_cache.has_key(tuple(x_t)):
            return prob_I_cache.get(tuple(x_t))
        else:
            sigma = 4 # todo change this

            delta = 2
            span = sigma*4
            x_range = range(1, span, delta)
            k = len(x_range)
            matx = np.zeros((k, k))
            for i in x_range:
                for j in x_range:
                    i_f = x_t[0] - (span/2) + i
                    j_f = x_t[1] - (span/2) + j

                    if (np.array([i_f, j_f]) < 0).any() or (np.array([i_f, j_f]) >= 512).any():
                        matx[i/delta, j/delta] = 0
                    else:
                        matx[i/delta, j/delta] = f([i_f, j_f], x_t, sigma)

            first = scint.simps(matx, x_range, axis=0)
            _sum = scint.simps(first, x_range)
            val = 1-_sum
            prob_I_cache[tuple(x_t)] = val
            return val
    else:
        return 0
Exemplo n.º 23
0
    def __init__(self,filepath,minbins=200):
        self.path = filepath
        
#        try:
        data = numpy.loadtxt(self.path)
        wf = data[:,0]
        tp = data[:,1]
        if len(data[:,0]) < minbins: #Re-sample large filters for performance
            wfx = numpy.linspace(wf[0],wf[-1],minbins)
            tpx = griddata(wf,tp,wfx)

            wf = wfx
            tp = tpx
            
        self.wave = wf * U.angstrom
        self.response = tp
        
        self.freq = (C.c/self.wave).to(U.Hz)
        
        nmax = numpy.argmax(self.response)
        halfmax_low = self.wave[:nmax][numpy.argmin(numpy.abs(self.response[nmax] - 2*self.response[:nmax]))]
        halfmax_hi = self.wave[nmax:][numpy.argmin(numpy.abs(self.response[nmax] - 2*self.response[nmax:]))]
        print self.wave[nmax],halfmax_low, halfmax_hi
        self.fwhm = halfmax_hi-halfmax_low
        
        self.lambda_c = (simps(self.wave*self.response,self.wave) / 
                         simps(self.response,self.wave))

        self.nu_c = (simps(self.freq*self.response,self.freq) / 
                     simps(self.response,self.freq))
Exemplo n.º 24
0
def synthesize_photometric_point(filt, wl, fl):
    import photometry
    '''Takes in a spectrum in [ang], and [erg/(s cm^2 ang)], and then returns Jy'''
    if filt in {"u","g","r","i","z"}:
        wl_key = "lam"
        p_key = "air1.0"
    else:
        wl_key = "WL"
        p_key = "RES"
    p = interp1d(photometry.responses[filt][wl_key],photometry.responses[filt][p_key])
    filt_re = photometry.responses[filt][wl_key]
    p_min = min(filt_re)
    p_max = max(filt_re)
    if p_min < wl[0] or p_max > wl[-1]:
        print("ERROR, spectrum is out of range of filter: %s" % filt)
    def px(x):
        if (x < p_min) or (x > p_max):
            return 0
        else:
            return p(x)
    pxs = map(px, wl)
    f_num = wl * pxs * fl
    f_denom = wl * pxs
    #print f_num, f_denom
    num = simps(f_num, x=wl)
    denom = simps(f_denom, x=wl)
    flux = num/denom
    #This should be in spectral flux [erg/(s cm^2 A)]
    central = denom/simps(pxs, wl)
    #Returned product is in Jy  
    return photometry.Flamb_to_Jy(flux,central)
Exemplo n.º 25
0
def Compute_Delta(niom, T, mu, Sig):
  # Matsubara Frequencies
  iom = pi*T*(2*arange(niom)+1)
  # Load DOS
  DOSfile = loadtxt('2D_SL_DOS')
  # 1st column as energies
  ommesh = DOSfile[:,0]
  # 2nd column as DOS
  DOS = DOSfile[:,1]
  # Normalize
  DOS = DOS / integrate.simps(DOS, ommesh)
  # Local Green function
  Gloc = zeros(niom, dtype=complex)
  for i in range(niom):
    Re = mu - ommesh - Sig[i].real
    Im = iom[i] - Sig[i].imag
    denom = 1/(Re**2 + Im**2)
    ReInt = DOS*Re*denom
    ImInt = DOS*Im*denom
    Gloc[i] = integrate.simps(ReInt, ommesh) - 1j*integrate.simps(ImInt, ommesh)

  Delta = 1j*iom+mu-Sig-1./Gloc
  with open('Delta.inp', 'w') as f:
    for i in range(niom):
      f.write('%.8f\t%.8f\t%.8f\n'%(iom[i], Delta[i].real, Delta[i].imag))
Exemplo n.º 26
0
def process_fid(fid, absint=False):
    ft = cut_fft(do_fft(cut_fid(fid)))
    if absint:
        res = integrate.simps(np.abs(ft['fft']), ft.index)
    else:
        res = integrate.simps(np.real(ft['fft']), ft.index)
    return res
Exemplo n.º 27
0
def cosine_content(pca_space, i):
    """Measure the cosine content of the PCA projection.

    The cosine content of pca projections can be used as an indicator if a
    simulation is converged. Values close to 1 are an indicator that the
    simulation isn't converged. For values below 0.7 no statement can be made.
    If you use this function please cite [BerkHess1]_.


    Parameters
    ----------
    pca_space: array, shape (number of frames, number of components)
        The PCA space to be analyzed.
    i: int
        The index of the pca_component projectection to be analyzed.

    Returns
    -------
    A float reflecting the cosine content of the ith projection in the PCA
    space. The output is bounded by 0 and 1, with 1 reflecting an agreement
    with cosine while 0 reflects complete disagreement.

    References
    ----------
    .. [BerkHess1] Berk Hess. Convergence of sampling in protein simulations.
                   Phys. Rev. E 65, 031910 (2002).
    """
    from scipy.integrate import simps
    t = np.arange(len(pca_space))
    T = len(pca_space)
    cos = np.cos(np.pi * t * (i + 1) / T)
    return ((2.0 / T) * (simps(cos*pca_space[:, i])) ** 2 /
            simps(pca_space[:, i] ** 2))
Exemplo n.º 28
0
def SusceptibilityHF(U,GF_A,X_A):
	''' susceptibility calculated from the full spectral self-energy derivative '''
	Int1_A = FD_A*sp.imag(GF_A**2*(1.0-U*X_A))
	Int2_A = FD_A*sp.imag(GF_A**2*X_A)
	I1 = simps(Int1_A,En_A)/sp.pi
	I2 = simps(Int2_A,En_A)/sp.pi
	return 2.0*I1/(1.0+U**2*I2)
Exemplo n.º 29
0
def test_simpson():
    ncalls = 10
    func = lambda x: np.sin(x - 0.2414)*x + 2.

    x = np.linspace(0, 10, 250001)
    y = func(x)

    t0 = time.time()
    for i in range(ncalls):
        s1 = simpson(y, dx=x[1]-x[0])
    print("cython (odd): {0} sec for {1} calls".format(time.time() - t0,ncalls))

    t0 = time.time()
    for i in range(ncalls):
        s2 = simps(y, x=x)
    print("python (odd): {0} sec for {1} calls".format(time.time() - t0,ncalls))
    np.testing.assert_allclose(s1, s2)

    # -----------------------------------------------------
    print()
    x = np.linspace(0, 10, 250000)
    y = func(x)
    t0 = time.time()
    for i in range(ncalls):
        s1 = simpson(y, dx=x[1]-x[0])
    print("cython (even): {0} sec for {1} calls".format(time.time() - t0,ncalls))

    t0 = time.time()
    for i in range(ncalls):
        s2 = simps(y, x=x)
    print("python (even): {0} sec for {1} calls".format(time.time() - t0,ncalls))

    np.testing.assert_allclose(s1, s2)
Exemplo n.º 30
0
def calcQ(lamin0, specin0, mstar=1.0, helium=False, f_nu=False):
    '''
    Claculate the number of lyman ionizing photons for given spectrum
    Input spectrum must be in ergs/s/A!!
    Q = int(Lnu/hnu dnu, nu_0, inf)
    '''
    lamin = np.asarray(lamin0)
    specin = np.asarray(specin0)
    c = 2.9979e18 #ang/s
    h = 6.626e-27 #erg/s
    if helium:
        lam_0 = 304.0
    else:
        lam_0 = 911.6
    if f_nu:
        nu_0 = c/lam_0
        inds, = np.where(c/lamin >= nu_0)
        hlam, hflu = c/lamin[inds], specin[inds]
        nu = hlam[::-1]
        f_nu = hflu[::-1]
        integrand = f_nu/(h*nu)
        Q = simps(integrand, x=nu)
    else:
        inds, = np.nonzero(lamin <= lam_0)
        lam = lamin[inds]
        spec = specin[inds]
        integrand = lam*spec/(h*c)
        Q = simps(integrand, x=lam)*mstar
    return Q
Exemplo n.º 31
0
    def line_integral(self, func, method='sum'):
        """
        func = /oint F(x,y) dl
        :param func: self - func(X, Y), Union[ndarray, int, float] or function values or 2D spline
        :param method: str, ['sum', 'trapz', 'simps']
        :return:
        """
        import inspect
        import numpy as np
        from scipy.integrate import trapz, simps, quad

        #
        dx = np.hstack((0, np.cumsum(self.dists)))
        # first evaluate the dimension of coord - self and the function
        if self.grid:
            raise TypeError(
                'The grid is used - currently not possible to calculated the line average value from grid'
            )

        if self.dim == 1:
            if method == 'sum':
                x1 = (self.x1[1:] - self.x1[:-1]) / 2
            else:
                x1 = self.x1

            if inspect.isclass(func) or inspect.isfunction(func):
                func_val = func(x1)
            elif isinstance(func, float) or isinstance(func, int):
                func_val = func
            elif inspect.ismodule(inspect.getmodule(func)):
                func_val = func(x1)
            else:
                if method == 'sum':
                    func_val = (func[1:] + func[:-1]) / 2
                else:
                    func_val = func

            if method == 'sum':
                line_integral = np.sum(func_val * self.dists)
            elif method == 'trapz':
                line_integral = trapz(func_val, dx)
            elif method == 'simps':
                line_integral = simps(func_val, dx)
            else:
                line_integral = None

        elif self.dim == 2:
            if method == 'sum':
                x1 = (self.x1[1:] + self.x1[:-1]) / 2
                x2 = (self.x2[1:] + self.x2[:-1]) / 2
            else:
                x1 = self.x1
                x2 = self.x2
            if inspect.isclass(func) or inspect.isfunction(func):
                func_val = func(x1, x2)
            elif isinstance(func, float) or isinstance(func, int):
                func_val = func
            elif inspect.ismodule(inspect.getmodule(func)):
                func_val = func(x1, x2)
            else:
                if method == 'sum':
                    if func.ndim == 1:
                        func_val = (func[1:] + func[:-1]) / 2
                    else:
                        func_val = (func[1:, 1:] + func[:-1, :-1]) / 2
                else:
                    func_val = func

            if method == 'sum':
                line_integral = np.sum(func_val * self.dists)
            elif method == 'trapz':
                if func_val.ndim == 1:
                    line_integral = trapz(func_val, dx)
                else:
                    line_integral = trapz(trapz(func_val, x1), x2)
            elif method == 'simps':
                if func_val.ndim == 1:
                    line_integral = simps(func_val, dx)
                else:
                    line_integral = simps(simps(func_val, x1), x2)
            else:
                line_integral = None

        elif self.dim == 3:
            raise TypeError(
                'The 3D function was given - line averaged value needs 2D')

        return line_integral
Exemplo n.º 32
0
def noise_averaging(x, noise_weights, cj_array):
    from scipy.integrate import simps
    norm = simps(noise_weights, x)
    matrix_int = simps(np.multiply(cj_array, noise_weights), x)
    return matrix_int / norm
Exemplo n.º 33
0
x_totalrot = np.zeros(num_samples)
y_totalrot = np.zeros(num_samples)
z_totalrot = np.zeros(num_samples)
x_peakvel = np.zeros(num_samples)
y_peakvel = np.zeros(num_samples)
z_peakvel = np.zeros(num_samples)
x_peakrot = np.zeros(num_samples)
y_peakrot = np.zeros(num_samples)
z_peakrot = np.zeros(num_samples)

classlabels = np.zeros(num_samples, dtype=int)

for i in range(num_samples):
    x_velocity = cumtrapz(movement_rawdata_collected[i]["x acceleration"][:])
    x_peakvel[i] = abs(max(x_velocity, key=abs))
    x_totaldisp[i] = simps(x_velocity)
    y_velocity = cumtrapz(movement_rawdata_collected[i]["y acceleration"][:])
    y_peakvel[i] = abs(max(y_velocity, key=abs))
    y_totaldisp[i] = simps(y_velocity)
    z_velocity = cumtrapz(movement_rawdata_collected[i]["z acceleration"][:])
    z_peakvel[i] = abs(max(z_velocity, key=abs))
    z_totaldisp[i] = simps(z_velocity)

    x_rot_velocity = cumtrapz(movement_rawdata_collected[i]["x gyroscope"][:])
    x_peakrot[i] = abs(max(x_rot_velocity, key=abs))
    x_totalrot[i] = simps(x_rot_velocity)
    y_rot_velocity = cumtrapz(movement_rawdata_collected[i]["y gyroscope"][:])
    y_peakrot[i] = abs(max(y_rot_velocity, key=abs))
    y_totalrot[i] = simps(y_rot_velocity)
    z_rot_velocity = cumtrapz(movement_rawdata_collected[i]["z gyroscope"][:])
    z_peakrot[i] = abs(max(z_rot_velocity, key=abs))
Exemplo n.º 34
0
 def test_pdf_unity_area(self):
     from scipy.integrate import simps
     # PDF should integrate to one
     assert_almost_equal(
         simps(stats.genexpon.pdf(numpy.arange(0, 10, 0.01), 0.5, 0.5, 2.0),
               dx=0.01), 1, 1)
Exemplo n.º 35
0
def analytical_solution(q0,Nph,aval):
    """Analytical solution driver, returns rI, vI"""
    import h5py
    import numpy as np
    import scipy.integrate as sp
    z0, z, xR, t0, H0, dUnit, tUnit, lUnit = get_params('DD0000/data0000')
    f = h5py.File('DD0000/data0000.cpu0000','r')
    rho_data = f.get('/Grid00000001/Density')
    rho = rho_data[0][0][0]*dUnit
    del(rho_data)
    mp = 1.67262171e-24
    # initial nH: no need to scale by a, since a(z0)=1, but we do
    # need to accomodate for Helium in analytical soln
#    nH0 = rho/mp*0.76
    nH0 = rho/mp

    # We first set the parameter lamda = chi_{eff} alpha2 cl n_{H,0} t0, where
    #      chi_{eff} = correction for presence of He atoms [1 -- no correction]
    #      alpha2 = Hydrogen recombination coefficient [2.6e-13 -- case B]
    #      cl = the gas clumping factor [1 -- homogeneous medium]
    #      n_{H,0} = initial Hydrogen number density
    #      t0 = initial time
    alpha2 = 2.52e-13
    lamda = alpha2*nH0*t0
    
    # Compute the initial Stromgren radius, rs0 (proper, CGS units)
    rs0 = (Nph*3.0/4.0/pi/alpha2/nH0/nH0)**(1.0/3.0)  # no rescaling since a(z0)=1
    
    # We have the general formula for y(t):
    #    y(t) = (lamda/xi)exp(-tau(t)) integral_{1}^{a(t)} [da'
    #            exp(t(a'))/sqrt(1-2q0 + 2q0(1+z0)/a')] ,  where
    #    xi = H0*t0*(1+z0),
    #    H0 = Hubble constant
    #    tau(a) = (lamda/xi)*[F(a)-F(1)]/[3(2q0)^2(1+z0)^2/2],
    #    F(a) = [2(1-2q0) - 2q0(1+z0)/a]*sqrt(1-2q0+2q0(1+z0)/a)
    #
    # Here, a' is the variable of integration, not the time-derivative of a.
    F1 = (2.0*(1.0-2.0*q0) - 2.0*q0*(1.0+z0))*sqrt(1.0-2.0*q0+2.0*q0*(1.0+z0))
    xi = H0*t0*(1.0+z0)
    
    # set integration nodes/values (lots)
    inodes = 1000001
    if (aval == 1.0):
        numint = 0.0
    else:
        a = linspace(1,aval,inodes)
        integrand = zeros(inodes, dtype=float)
        arat = divide(2.0*q0*(1.0+z0), a)
        sqa = sqrt(add(1.0-2.0*q0, arat))
        afac = subtract(2*(1-2*q0), arat)
        arg1 = subtract(afac*sqa, F1)
        arg2 = exp(multiply((lamda/xi)/(6*q0*q0*(1+z0)*(1+z0)), arg1))
        integrand = divide(arg2,sqa)
        
        # perform numerical integral via composite Simpson's rule
        numint = sp.simps(integrand, a)
    tauval = (lamda/xi)*((2*(1-2*q0) - 2*q0*(1+z0)/aval)*sqrt(1-2*q0+2*q0*(1+z0)/aval)-F1)/(6*q0*q0*(1+z0)*(1+z0))
    y = lamda/xi*exp(-tauval)*numint;
    
    # extract the current Stromgren radius and velocity
    ythird = sign(y)*abs(y)**(1.0/3.0);
    rI = ythird/aval    # compute ratio rI/rS
    vI = (lamda/3)*aval/ythird*ythird*(1.0-y/aval**3);
    return [rI, vI]
Exemplo n.º 36
0
def _integration1D(m, x, axis_indx):
    a = simps(m, x[axis_indx], axis=axis_indx)
    return a
Exemplo n.º 37
0
def pdf2poe1D(pdf, x):
    poe = np.zeros((len(x), ))
    for i in range(len(x)):
        poe[i] = simps(pdf[i:], x[i:])
    return poe
Exemplo n.º 38
0
E_ind = np.zeros((Nz, Nt + N_delay))
E_rad = np.zeros((Nz, Nt + N_delay))
E_elec = np.zeros((Nz, Nt + N_delay))
E_elec2 = np.zeros((Nz, Nt + N_delay))
E_elec_total = np.zeros((Nz, Nt + N_delay))

E1 = np.zeros(Nz)
E2 = np.zeros(Nz)
E3 = np.zeros(Nz)
E4 = np.zeros(Nz)

#
#it.quad
for i in range(0, Ntz):  #for each time t, integrate over z
    E1 = (2 - 3 * ((np.sin(theta))**2)) * Iz[:, i] / (c * R * R)
    E_ind[:, i] = (1 / (2 * eps0 * math.pi)) * it.simps(E1, dx=dz)

    E2 = ((np.sin(theta))**2) * dIz[:, i] / (c * c * R)
    E_rad[:, i] = -(1 / (2 * eps0 * math.pi)) * it.simps(E2, dx=dz)

    E3 = Iz[:, i]
    E_elec[:, i] = (1 / (2 * eps0 * math.pi)) * it.simps(E3, dx=dz)

    E4 = (2 - 3 * ((np.sin(theta))**2)) / (R**3)
    E_elec_total[:, i] = (1 / (2 * eps0 * math.pi)) * it.simps(
        E4, dx=dz) * it.simps(E3, dx=dt)
#
##        for j in range(0,Nz):
##            print('stuck',i,j)
##            E4=(2-3*((np.sin(theta[j]))**2))/(R[j]*R[j]*R[j])
##            E_elec_total[j,i]=(1/(2*eps0*math.pi))*it.simps(E4,dx=dz)
Exemplo n.º 39
0
def SimpsonsScipy(x, y, I):
    integral = simps(y, x)
    e = abs(integral-I)/I
    return integral, e
Exemplo n.º 40
0
def fitter(n):
    #if ( ((float(npix)/float(n))-int(float(npix)/float(n))) == 0):
    #    print( (float(npix)/float(n)),"%")

    i = int(n / side_pix)
    j = int(n % side_pix)
    i += x_i
    j += y_i
    #example near center with signal
    #i=1105
    #j=1061
    #print( "i",i,"j",j,"x_i",x_i)

    if len(cube.shape) > 3:
        signal = cube[0, :, j, i]
    else:
        signal = cube[:, j, i]

    if (DoClip):
        signal[np.where(signal < 0)] = 0.
    if (signal.max() <= 0):
        return [None]
    mask = signal > -0.01 * signal.max()  ##mask for values too negative.
    selected_velocities = velocities[mask]
    selected_signal = signal[mask]
    Amp_init = selected_signal.max() - selected_signal.mean()
    if (Amp_init <= 0):
        return [None]
    #v0_init = selected_velocities[selected_signal==selected_signal.max()]
    v0_init = selected_velocities[np.argmax(
        selected_signal
    )]  # selected_velocities[selected_signal==selected_signal.max()]
    sigma_init = 0.1
    sigma_max = 30.
    ## take the error as the rmsnoise far from the line
    #noise = signal[(velocities<v0_init-1.) | (velocities>v0_init+1.)]
    #rmsnoise = sp.sqrt(sp.mean(noise**2.))

    a1_init = Amp_init
    mu1_init = v0_init
    sigma1_init = sigma_init
    maxslope = Amp_init / np.fabs(
        (np.max(selected_velocities) - np.min(selected_velocities)))
    base_a_init = 0.
    base_b_init = 0.

    if (dv < 0.):
        sys.exit("something is wrong with dv")

    limit_a1 = (0., 5. * Amp_init)  #Bounds for pars
    limit_mu1 = (velocities.min() - 1.0, velocities.max() + 1.0)
    limit_sigma1 = (dv / 2., sigma_max)
    limit_base_a = (-maxslope, maxslope)
    limit_base_b = (0., selected_signal.max())

    fallback_error_a1 = limit_a1[1]
    fallback_error_mu1 = 100.
    fallback_error_sigma1 = 100.
    fallback_error_base_a = maxslope
    fallback_error_base_b = rmsnoise

    if (DGauss):
        # velorange=velocities.min()-1.0, velocities.max()+1.0
        v0_init2 = v0_init
        if (Randomize):
            v0_init2 += 3. * dv * (
                np.random.random() - 0.5
            )  # selected_velocities[selected_signal==selected_signal.max()]

        a2_init = Amp_init
        mu2_init = v0_init2
        sigma2_init = sigma_init

        limit_a2 = (0., 5. * Amp_init),  #Bounds for pars
        limit_mu2 = (velocities.min() - 1.0, velocities.max() + 1.0)
        limit_sigma2 = (dv / 2., sigma_max)

        fallback_error_a2 = limit_a1[1]
        fallback_error_mu2 = 100.
        fallback_error_sigma2 = 100.

        if (CommonSigma):
            if DoBaseline:
                func = lambda x, a1, mu1, sigma1, a2, mu2, base_a, base_b: dgauss_wbase(
                    x, a1, mu1, sigma1, a2, mu2, sigma1, base_a, base_b)
                p0 = np.array([
                    a1_init, mu1_init, sigma1_init, a2_init, mu2_init,
                    base_a_init, base_b_init
                ])
                bounds = (limit_a1, limit_mu1, limit_sigma1, limit_a2,
                          limit_mu2, limit_base_a, limit_base_b)
                fallback_errors = np.array(
                    (fallback_error_a1, fallback_error_mu1,
                     fallback_error_sigma1, fallback_error_a2,
                     fallback_error_mu2, fallback_error_base_a,
                     fallback_error_base_b))
            else:
                func = lambda x, a1, mu1, sigma1, a2, mu2: dgauss_wbase(
                    x, a1, mu1, sigma1, a2, mu2, base_a_init, base_b_init)
                p0 = np.array(
                    [a1_init, mu1_init, sigma1_init, a2_init, mu2_init])
                bounds = (limit_a1, limit_mu1, limit_sigma1, limit_a2,
                          limit_mu2)
                fallback_errors = np.array(
                    (fallback_error_a1, fallback_error_mu1,
                     fallback_error_sigma1, fallback_error_a2,
                     fallback_error_mu2))
        else:
            if DoBaseline:
                func = lambda x, a1, mu1, sigma1, a2, mu2, sigma2, base_a, base_b: dgauss_wbase(
                    x, a1, mu1, sigma1, a2, mu2, sigma2, base_a, base_b)
                p0 = np.array([
                    a1_init, mu1_init, sigma1_init, a2_init, mu2_init,
                    sigma2_init, base_a_init, base_b_init
                ])
                bounds = (limit_a1, limit_mu1, limit_sigma1, limit_a2,
                          limit_mu2, limit_sigma2, limit_base_a, limit_base_b)
                fallback_errors = np.array(
                    (fallback_error_a1, fallback_error_mu1,
                     fallback_error_sigma1, fallback_error_a2,
                     fallback_error_mu2, fallback_error_sigma2,
                     fallback_error_base_a, fallback_error_base_b))
            else:
                func = lambda x, a1, mu1, sigma1, a2, mu2, sigma2: dgauss_wbase(
                    x, a1, mu1, sigma1, a2, mu2, sigma2, base_a_init,
                    base_b_init)
                p0 = np.array([
                    a1_init, mu1_init, sigma1_init, a2_init, mu2_init,
                    sigma2_init
                ])
                bounds = (limit_a1, limit_mu1, limit_sigma1, limit_a2,
                          limit_mu2, limit_sigma2)
                fallback_errors = np.array(
                    (fallback_error_a1, fallback_error_mu1,
                     fallback_error_sigma1, fallback_error_a2,
                     fallback_error_mu2, fallback_error_sigma2))
    else:
        if DoBaseline:
            func = lambda x, a1, mu1, sigma1, base_a, base_b: sgauss_wbase(
                x, a1, mu1, sigma1, base_a, base_b)
            p0 = np.array(
                [a1_init, mu1_init, sigma1_init, base_a_init, base_b_init])
            bounds = (limit_a1, limit_mu1, limit_sigma1, limit_base_a,
                      limit_base_b)
            fallback_errors = np.array(
                (fallback_error_a1, fallback_error_mu1, fallback_error_sigma1,
                 fallback_error_base_a, fallback_error_base_b))
        else:
            func = lambda x, a1, mu1, sigma1: sgauss_wbase(
                x, a1, mu1, sigma1, base_a_init, base_b_init)
            p0 = np.array([a1_init, mu1_init, sigma1_init])
            bounds = (limit_a1, limit_mu1, limit_sigma1)
            fallback_errors = np.array(
                (fallback_error_a1, fallback_error_mu1, fallback_error_sigma1))

    xdata = selected_velocities
    ydata = selected_signal
    try:
        popt, pcov = op.curve_fit(func,
                                  xdata,
                                  ydata,
                                  sigma=np.ones(len(ydata)) * rmsnoise,
                                  p0=p0)
        if (np.any(np.diag(pcov) < 0.)):
            # print("invalid values in variances",np.diag(pcov))
            popt = p0.copy()
            perr = fallback_errors
        else:
            perr = np.sqrt(np.diag(pcov))
    except:
        #print("Error - curve_fit failed")
        popt = p0.copy()
        perr = fallback_errors

    optimresult = {}

    #optimresult['a']={value:popt[0],error:perr[0]}
    optimresult['values'] = {}
    optimresult['errors'] = {}
    #paramnames=['a1','mu1','sigma1','a2','mu2','sigma2','base_a','base_b']
    setofparamnames = ['a1', 'mu1',
                       'sigma1']  #,'a2','mu2','sigma2','base_a','base_b']
    for param in enumerate(setofparamnames):
        iparam = param[0]
        aparam = param[1]
        optimresult['values'][aparam] = popt[iparam]
        optimresult['errors'][aparam] = perr[iparam]

    g_amp = optimresult['values']['a1']
    g_v0 = optimresult['values']['mu1']
    g_sigma = optimresult['values']['sigma1']

    g_amp_e = optimresult['errors']['a1']
    g_v0_e = optimresult['errors']['mu1']
    g_sigma_e = optimresult['errors']['sigma1']

    if (g_sigma < 0):
        # print("g_sigma negative: ",g_sigma)
        g_sigma = sigma1_init
        g_sigma_e = fallback_error_sigma1

    gaussfit1 = gaussian(velocities, g_amp, g_v0, g_sigma)
    fit1 = gaussfit1

    if (DGauss):
        pars = popt.copy()
        err_pars = perr.copy()
        if CommonSigma:
            #pars = [optimresult['values']['a'], optimresult['values']['mu'], optimresult['values']['sigma'],optimresult['values']['a2'], optimresult['values']['mu2'], optimresult['values']['sigma']]   # pars for best fit
            #err_pars = [optimresult['errors']['a'], optimresult['errors']['mu'], optimresult['errors']['sigma'],optimresult['errors']['a2'], optimresult['errors']['mu2'], optimresult['errors']['sigma']]  #error in pars
            pars = np.append(pars, pars[2])
            err_pars = np.append(err_pars, pars[2])

        amps = [[pars[0], 0], [pars[3], 3]]
        amps_sorted = sorted(amps, key=itemgetter(0))
        i_G1 = amps_sorted[-1][1]
        i_G2 = amps_sorted[0][1]

        g_amp = pars[i_G1]
        g_v0 = pars[i_G1 + 1]
        g_sigma = pars[i_G1 + 2]

        g_amp_e = err_pars[i_G1]
        g_v0_e = err_pars[i_G1 + 1]
        g_sigma_e = err_pars[i_G1 + 2]
        gaussfit1 = gaussian(velocities, g_amp, g_v0, g_sigma)

        g2_amp = pars[i_G2]
        g2_v0 = pars[i_G2 + 1]
        g2_sigma = pars[i_G2 + 2]
        g2_amp_e = err_pars[i_G2]
        g2_v0_e = err_pars[i_G2 + 1]
        g2_sigma_e = err_pars[i_G2 + 2]
        gaussfit2 = gaussian(velocities, g2_amp, g2_v0, g2_sigma)

        fit1 = gaussfit1 + gaussfit2

        vpeak = velocities[np.argmax(fit1)]
        ComputeG8 = True
        if ComputeG8:
            vpeak_init = vpeak

            f_vpeak = lambda vmax: neggaussfit(vmax, g_amp, g_v0, g_sigma,
                                               g2_amp, g2_v0, g2_sigma)

            #mvpeak = Minuit(f_vpeak, vmax=vpeak_init,  #initial guess
            #                errordef=1,                      # error
            #                error_vmax=dv*0.01,
            #                #limit_vmax=(velocities.min()-1.0, velocities.max()+1.0),
            #                limit_vmax=(np.min(selected_velocities), np.max(selected_velocities)), #Bounds for pars
            #                print_level=0,
            #                )
            #mvpeak.migrad()
            #
            #vpeakMinuit=mvpeak.values['vmax']

            res = op.minimize(f_vpeak, vpeak_init)
            vpeak = res.x
            #print("Scipy optimize",vpeak," Minuit optimize",vpeakMinuit)

    if (DoBaseline):
        base_a = popt[-2]
        base_b = popt[-1]
        baseline = base_a * velocities + base_b
        fit1 += baseline

    fiterror = np.std(fit1 - signal)

    #fitinit = gaussian(velocities, Amp_init,v0_init,sigma_init)
    #plt.plot(velocities, signal)
    #plt.plot(velocities, fit1)
    #plt.plot(velocities, fitinit)
    #plt.show()

    gmom_0 = abs(simps(fit1, velocities))

    # print( "vel range:",np.min(velocities),np.max(velocities),"best fit",pars[1])
    ic = np.argmin(abs(velocities - g_v0))
    if (g_sigma < sigma_init):
        Delta_i = sigma_init / dv
    else:
        Delta_i = g_sigma / dv

    nsigrange = 5
    i0 = int(ic - nsigrange * Delta_i)
    if (i0 < 0):
        i0 = 0
    i1 = int(ic + nsigrange * Delta_i)
    if (i1 > (len(velocities) - 1)):
        i1 = (len(velocities) - 1)
    j0 = int(ic - nsigrange * Delta_i)
    if (j0 < 0):
        j0 = 0
    j1 = int(ic + nsigrange * Delta_i)
    if (j1 > (len(velocities) - 1)):
        j1 = (len(velocities) - 1)

    #print( "i0",i0,"i1",i1,"j0",j0,"j1",j1)

    sign = 1.
    if (velocities[1] < velocities[0]):
        sign = -1
    Smom_0 = sign * simps(signal[i0:i1], velocities[i0:i1])
    Smom_1 = simps(signal[i0:i1] * velocities[i0:i1], velocities[i0:i1])
    subvelo = velocities[i0:i1]
    Smom_8 = subvelo[np.argmax(signal[i0:i1])]
    Smax = np.max(signal[i0:i1])

    if (abs(Smom_0) > 0):
        Smom_1 /= Smom_0
        if (Smom_0 > 0):
            var = sign * simps(signal[i0:i1] * (velocities[i0:i1] - Smom_1)**2,
                               velocities[i0:i1])
            if (var > 0):
                Smom_2 = np.sqrt(var / Smom_0)
            else:
                Smom_2 = -1E6
        else:
            Smom_2 = -1E6
    else:
        Smom_1 = -1E6
        Smom_2 = -1E6

    sol = [
        i, j, gmom_0, g_amp, g_amp_e, g_v0, g_v0_e, g_sigma, g_sigma_e, Smom_0,
        Smom_1, Smom_2, Smom_8, fiterror, gaussfit1
    ]
    if DGauss:
        sol.extend([
            gaussfit2, g2_amp, g2_amp_e, g2_v0, g2_v0_e, g2_sigma, g2_sigma_e,
            vpeak
        ])
    if (DoBaseline):
        sol.extend([base_a, base_b, baseline])
    sol.extend([
        Smax,
    ])

    return sol
Exemplo n.º 41
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-#
#
# Author      : Bhishan Poudel; Physics Graduate Student, Ohio University
# Date        : Tue Mar 14, 2017

# Imports
import numpy as np
from scipy.integrate import simps

col1, col2 = np.genfromtxt("data.txt",
                           delimiter=None,
                           usecols=(0, 1),
                           dtype=None,
                           unpack=True)
mysum = simps(col1)
print(mysum)
Exemplo n.º 42
0
if __name__=="__main__":
	"""
	Testing idea of Normalization using the example Int_0^beta d(tau) exp(-tau) 
	"""
	beta=1.5
	Norm=0.1
	print "beta=",beta,"Norm=",Norm
	warm=5000
	Ns=int(1e6)
	Ne=int(8e6)
        Nls=arange(Ns,Ne,Ne/5)	
	res=[]
	measure=100
	tls=linspace(0,beta,1000)
	yls=array([func(tau) for tau in tls]) 
	exact=integrate.simps(yls,tls)

	for Nitt in Nls:
	  Dorder_init=0   ##we start at order 0
	  N0p,N1p=Sample(Nitt,warm,measure,Dorder_init)
	   
	  print "Nitt=",Nitt
	  print "Exact result of the integration is:"
	  print exact
	  print
	  print "Monte carlo sampling is:"
	  print Norm*N1p/N0p
	  res.append(Norm*N1p/N0p)
	  print
	  print "Direct Monte carlo sampling is:"
	  print SimpleMC(Nitt)
Exemplo n.º 43
0
def variation(x, V, varsol, params, show = False):
    """Calculates the parameters which minimize the energy for a variational 
    ground state function in 1D.  
    
    Input:
        x : (array) Indicates the spatial extent of the domain and imposses 
            Dirichlet boundary conditions at its endpoints
        V : (array) Underlying potential which will determine the solution
        varsol : (function) Function of variational parameters which will be 
            used to constrain the solutions. 
            varsol(x, [a,b,c,...]) where [a,b,c,...] are particular choices
                from params
        params : (array) A list of tuples which represent a paricular choice of
            parameters for the variational solution. An example:
            a = np.arange(0, 10, 0.1)
            b = np.arange(0, 10, 0.1)
            c = np.arange(0, 10, 0.1)
            params = [[i, j, k] for i in a for j in b for k in c]
            
    Output:
        psi : (array) The wave function which minimizes the energy on the 
            domain x for the parameters dictated in params
        E : (float) The resulting energy
        opt_params : (tuple) List of the parameter values which minimize the 
            energy
        
    Optional:
        show = False : Plots x vs psi, the variational solution and prints the 
            energy and optimal parameters
    
    Notes:
    
    """
    import numpy as np
    from matplotlib import pyplot as plt
    from scipy.integrate import simps
    
    #Check that the array x and V are compatible
    if len(x) != len(V):
        print('Error: x and V are incompatible arrays.')
        return 0            
    
    #Calculate all of the energies for the input parameter space. 
    energies = [Evar(x, varsol(x, p)/np.sqrt(simps(varsol(x, p)**2, x)), V) 
                    for p in params]
                        
    #Determine the minimum energy
    arg_opt_params = np.argmin(energies)
    
    varsol_opt = (varsol(x, params[arg_opt_params]) /
                    np.sqrt(simps(varsol(x, params[arg_opt_params])**2, x)) )
    
    if show == True:
        print('Energy minimum = '+str(energies[arg_opt_params]))
        print('Optimum parameters = '+str(params[arg_opt_params]))
        fig = plt.figure()
        plt.plot(x, varsol_opt, label = 'var sol')
        plt.legend()
        plt.show()
    
    #Warning if the minimum energy solution is on the edge of the allowed 
    #   parameter space, indicating the a local minimum was never achieved.
    #NEED    
    
    return varsol_opt, energies[arg_opt_params], params[arg_opt_params]
Exemplo n.º 44
0
    dat = procdata(galdata,gwdata)

    if np.isscalar(hub_arr):
        if ii==0:
            val = prob_h0(msk_gal_dat,hub_arr)
            val = val/(np.sum(val))
        else:
            val = val * prob_h0(galdata,gwdata,hub_arr)
            val = val/(np.sum(val))
    else:
        val = 0.0*hub_arr
        for pp,hh in enumerate(hub_arr):
            if ii==0:
                val[pp] = prob_h0(dat,hh)
                #val = val/(np.sum(val))
            else:
                val[pp] = val[pp] * prob_h0(dat,hh)
        val = val/(1.0*simps(val,hub_arr))        
        #val = val/(np.sum(val))
 
    print ii 
    
mat = np.transpose([hub_arr,val])
np.savetxt('./post_hub_2.dat',mat,header='#h0,p(h0)')

    




Exemplo n.º 45
0
    g4 = np.argmax(np.abs(w1f + q1f))
    norm_1f = w1f[g4] + q1f[g4]

    w1f /= norm_1f
    q1f /= norm_1f
    ux /= norm_1f
    uy /= norm_1f
    uz /= norm_1f

    dw1f = np.gradient(w1f, z_1f)
    dux = np.gradient(ux, z_1f)
    duy = np.gradient(uy, z_1f)
    duz = np.gradient(uz, z_1f)

    e1f_tot = simps(
        rho1f * (np.abs(ux)**2 + 4 * np.abs(uy)**2 + np.abs(uz)**2), z_1f)

    e1f_A1 = simps(-(dvx1f * np.real(uz * np.conj(ux))) * rho1f, z_1f)
    e1f_A2 = simps(-(4.0 * dvy1f * np.real(uz * np.conj(uy))) * rho1f, z_1f)
    e1f_A3 = simps(-(dvz1f * np.abs(uz)**2) * rho1f, z_1f)
    e1f_A = e1f_A1 + e1f_A2 + e1f_A3
    e1f_B = simps(
        -vz1f * np.real(dux * np.conj(ux) + 4.0 * duy * np.conj(uy) +
                        duz * np.conj(uz)) * rho1f, z_1f)
    e1f_C = simps(
        (kx1f * np.imag(w1f * np.conj(ux)) - np.real(dw1f * np.conj(uz))) /
        (1.0 + eps1f) * rho1f, z_1f)
    e1f_D = simps(
        -2.0 * eta_hat * np.real(q1f * np.conj(ux)) * eps1f / (1.0 + eps1f) /
        (1.0 + eps1f) * rho1f, z_1f)
    e1f_E = simps(
Exemplo n.º 46
0
sigmaPrior = 1.7 / np.sqrt(2.0)
priorLogN = np.exp(-(np.log(B) - np.log(B0))**2 / (2.0 * sigmaPrior**2)) / B

prior = priorLogN

pl.close('all')
f, ax = pl.subplots(1, 2, figsize=(12, 4), num=1)
ax = ax.flatten()

colors = brewer2mpl.get_map('Dark2', 'qualitative', nFluxes).mpl_colors

loop = 0

for i in range(len(fluxes)):
    posterior = prior * pB[2, i, :]
    normaliz = simps(posterior, B)
    ax[loop].plot(B,
                  posterior / normaliz,
                  label=labels[i],
                  linewidth=2,
                  color=colors[i])
    MMAP[2, i] = B[posterior.argmax()]
ax[loop].set_ylabel(r'p(B|$\Phi_\mathrm{obs}$)')
ax[loop].set_xlabel('B [G]')
#ax[loop].annotate(r'$\sigma_n$='+"{0:4.1f}".format(sigmas[2])+r' Mx cm$^{-2}$', xy=(0.55, 0.86), xycoords='axes fraction', fontsize=14)
ax[loop].set_xlim((0, 400))
ax[loop].legend(labelspacing=0.2, prop={'size': 12}, loc='upper right')
loop += 1

for i in range(len(fluxes)):
    posterior = prior * pB[2, i, :]
Exemplo n.º 47
0
import numpy as np
from scipy.integrate import simps

h = (0.5 - 0.1)/2
eps = 0.1
#m, segunda derivada

#e = 

trap = np.trapz([1.8,2.6,3.0,2.8,1.9], x = [0.1,0.2,0.3,0.4,0.5])
sim = simps(np.array([1.8,2.6,3.0,2.8,1.9]), x = [0.1,0.2,0.3,0.4,0.5])


print "metodo de simpson"
print sim

print "metodo del trapecio"
print trap


Exemplo n.º 48
0
    normed_mean = np.ascontiguousarray(mq[:,r] / np.linalg.norm(mq[:,r]))
    for k in range(N):
        q_c = q[:,k] / np.linalg.norm(q[:,k])
        G,T = dp(normed_mean, t, q_c, t, t, t, lam)
        gam0 = np.interp(t, T, G)
        gam[k,:] = (gam0-gam0[0])/(gam0[-1] - gam0[0])  # slight change on scale
        gam_dev[k,:] = np.gradient(gam[k,:], 1/(M-1))
        f_temp[:,k] = np.interp(compose_temp(gam[k,:]), t, f[:,k]);
        q_temp[:,k] = np.gradient(f_temp[:,k], binsize) / \
                      np.sqrt(np.abs(np.gradient(f_temp[:,k], binsize))+eps)

    qcollect[:,:,r+1] = q_temp
    fcollect[:,:,r+1] = f_temp
    
    ds[r+1] = np.sum(simps(
            (mq[:,r].reshape(-1,1) - qcollect[:,:,r+1])**2, t, axis=0)) +  \
        lam * np.sum(simps((1-np.sqrt(gam_dev.T))**2,t,axis=0))
    
    # Minimization Step
    # compute the mean of the matched function
    mq[:,r+1] = np.mean(qcollect[:,:,r+1], 1)
    
    qun[r] = np.linalg.norm(mq[:,r+1] - mq[:,r]) / np.linalg.norm(mq[:,r])
    if qun[r] < 1e-2 or r >= MaxItr:
        break


r = r+1
for k in range(N):
    q_c = q[:,k]
    mq_c = mq[:,r]
Exemplo n.º 49
0
        print ' norm0 =', norm
        print ' bdims =', mps.bdim()
        mps.mul(1.0 / norm)
        rfun, wfun = self.quadfun(case, refdet)
        from scipy.integrate import trapz, simps
        psum = 0.0
        expval = 0.0
        # Partilce number
        if case == 'n':
            b = 2 * numpy.pi
            xdata = numpy.linspace(0, b, num=npoints)
            ydata = fx(xdata, mps, rfun)
            for n in range(2 * k + 1):
                ydata2 = map(lambda x: wfun(x, n), xdata)
                ydata2 = (ydata2 * ydata).real
                y = simps(ydata2, xdata)
                psum += y
                expval += y * n
                print ' n =%3d' % n, ' p[n] =%10.5f' % y

        elif case == 'sz':
            b = 2 * numpy.pi
            xdata = numpy.linspace(0, b, num=npoints)
            ydata = fx(xdata, mps, rfun)
            for sz in numpy.arange(-0.5 * k, 0.5 * k + 0.1, 0.5):
                ydata2 = map(lambda x: wfun(x, sz), xdata)
                ydata2 = (ydata2 * ydata).real
                y = simps(ydata2, xdata)
                psum += y
                expval += y * sz
                print ' sz =%5.1f' % sz, ' p[n] =%10.5f' % y
Exemplo n.º 50
0
def calc_beam_area(beam_profile):
    r, b = beam_profile
    return integrate.simps(2 * np.pi * r * b, r)
Exemplo n.º 51
0
            # Plot the power spectral density and fill the delta area
            plt.figure(figsize=(7, 4))
            plt.plot(freqs, psd, lw=2, color='k')
            plt.fill_between(freqs, psd, where=idx_delta, color='skyblue')
            plt.xlabel('Frequency (Hz)')
            plt.ylabel('Power spectral density (uV^2 / Hz)')
            plt.xlim([0, 50])
            plt.ylim([0, psd.max() * 1.1])
            plt.title("Welch's periodogram")
            sns.despine()

            # Frequency resolution
            freq_res = freqs[1] - freqs[0]  # = 1 / 4 = 0.25

            # Compute the absolute power by approximating the area under the curve
            delta_power = simps(psd[idx_delta], dx=freq_res)
            print('Absolute delta power: %.3f uV^2' % delta_power)

            # Relative delta power (expressed as a percentage of total power)
            total_power = simps(psd, dx=freq_res)
            delta_rel_power = delta_power / total_power
            print('Relative delta power: %.3f' % delta_rel_power)

            # 생체 데이터 BandPower 값 구하기
            window_sec = 5 * sfreq
            delta = [0.2, 4]
            theta = [4, 8]
            alpha = [8, 12]
            beta = [12, 30]
            gamma = [30, 100]
Exemplo n.º 52
0
    def analyze_fast(self, refdet):
        print '\n[mps_class.analyze_fast]'
        print ' Analyze particle-holes contributions by projection, spin-orbital is assumed.'
        print ' refdet = ', refdet
        print ' bdims  = ', self.bdim()
        #
        # The population of states with t[ph] 'quantum' number:
        #   nt = <t|t> where |t>=P|0>
        #      = <0|P2|0> = <0|P|0> = (numerical integrations)
        # Possible values for no. of quasi-particles = [0,K]
        # Since the projector itself is Hermitian, nt is real.
        #
        # U(1) group projector: Pn = 1/2pi*int_{0,2pi} exp(i*(N-n)x) dx
        # discretized version:     = 1/2pi*sum_{k} exp(-in*xk)*<MPS|MPO(xk)|MPS>
        #
        global icounter
        icounter = 0

        # Complex algebra is mandatory in order to use the separability of exp(iNx)
        def fx(xdata, mps, refdet):
            global icounter
            icounter += 1
            nsite = len(refdet)
            sites1 = mps.torank2()
            ydata = numpy.zeros(xdata.shape, dtype=numpy.complex_)
            for i, x in enumerate(xdata):
                sites2 = []
                for isite in range(nsite):
                    if refdet[isite] == 0:
                        ntmp = numpy.array([[1., 0.], [0.,
                                                       cmath.exp(1.j * x)]])
                    else:
                        ntmp = numpy.array([[cmath.exp(1.j * x), 0.], [0.,
                                                                       1.]])
                    if isite == 0:
                        tmp = numpy.einsum('ij,ja->ia', ntmp, sites1[isite])
                    elif isite == nsite - 1:
                        tmp = numpy.einsum('ij,aj->ai', ntmp, sites1[isite])
                    else:
                        tmp = numpy.einsum('ij,ajb->aib', ntmp, sites1[isite])
                    sites2.append(tmp)
                ydata[i] = mpslib.mps_dot(sites1, sites2) / (2.0 * numpy.pi)
            return ydata

        # First normalize and compress to accelerate the analysis
        norm = self.norm()
        mps = self.copy()
        #mps.icompress()
        print ' norm0  = ', norm
        print ' bdims  = ', mps.bdim()
        mps.mul(1.0 / norm)
        norm2 = mps.norm()
        k = self.nsite
        nelec = sum(refdet)
        maxNoQuasiParticles = 2 * (min(nelec, k - nelec) + 1)
        from scipy.integrate import trapz, simps
        # Trapezoidal rule
        npoints = 1000
        xdata = numpy.linspace(0, 2 * numpy.pi, num=npoints)
        ydata = fx(xdata, mps, refdet)
        psum = 0.0
        for n in range(maxNoQuasiParticles):
            if n % 2 == 1: continue
            ydata2 = map(lambda x: cmath.exp(-1.j * n * x), xdata)
            ydata2 = (ydata2 * ydata).real
            y = simps(ydata2, xdata)
            print ' n = %3d' % n, ' erank=', n / 2, ' y=', y, ' icounter=', icounter
            psum += y
        print ' Total population =', psum
        return 0
Exemplo n.º 53
0
    '''
    integ = simps(spectr / k, x=k)
    L = 2 * np.pi * integ / E_tot

    return L


L_iso = np.zeros(len(tiempo))
L_para = np.zeros(len(tiempo))
L_perp = np.zeros(len(tiempo))

for i in range(2, len(tiempo) + 1):
    E_iso = np.loadtxt(path + file_iso.format(i),
                       delimiter='  ',
                       usecols=(1, ))
    E_tot = simps(E_iso, x=k_iso)

    E_para = np.loadtxt(path + file_para.format(i),
                        delimiter='  ',
                        usecols=(1, ))
    E_perp = np.loadtxt(path + file_perp.format(i),
                        delimiter='  ',
                        usecols=(1, ))

    L_iso[i - 1] = long_int(k_iso, E_iso, E_tot)
    L_para[i - 1] = long_int(k_para[1:], E_para[1:], E_tot)
    L_perp[i - 1] = long_int(k_perp[1:], E_perp[1:], E_tot)

#%% Graficamos esto

plt.figure()
Exemplo n.º 54
0
    def __init__(self, iplot=False, quiet=False):
        """
        :param iplot: display the result when set to True (default False)
        :type iplot: bool
        :param quiet: less verbose when set to True (default is False)
        :type quiet: bool
        """
        if os.path.exists('tInitAvg'):
            file = open('tInitAvg', 'r')
            tstart = float(file.readline())
            file.close()
            logFiles = scanDir('log.*')
            tags = []
            for lg in logFiles:
                nml = MagicSetup(quiet=True, nml=lg)
                if nml.start_time > tstart:
                    if os.path.exists('bLayersR.{}'.format(nml.tag)):
                        tags.append(nml.tag)
            if len(tags) > 0:
                print(tags)
            else:
                tags = None
            MagicSetup.__init__(self, quiet=True, nml=logFiles[-1])

            a = AvgField()
            self.nuss = a.nuss
            self.reynolds = a.reynolds
            e2fluct = a.ekin_pol_avg + a.ekin_tor_avg - a.ekin_pola_avg - a.ekin_tora_avg
        else:
            logFiles = scanDir('log.*')
            MagicSetup.__init__(self, quiet=True, nml=logFiles[-1])
            tags = None
            self.nuss = 1.
            self.reynolds = 1.
            e2fluct = 1.
        par = MagicRadial(field='bLayersR', iplot=False, tags=tags)
        self.varS = abs(par.entropy_SD)
        self.ss = par.entropy

        if os.path.exists('tInitAvg'):
            logFiles = scanDir('log.*', tfix=1409827718.0)
            # Workaround for code mistake before this time
            tfix = 1409827718.0
            tagsFix = []
            for lg in logFiles:
                nml = MagicSetup(quiet=True, nml=lg)
                if nml.start_time > tstart:
                    if os.path.exists('bLayersR.{}'.format(nml.tag)):
                        tagsFix.append(nml.tag)
            if len(tagsFix) > 0:
                print('Fix temp. tags', tagsFix)
                parFix = MagicRadial(field='bLayersR',
                                     iplot=False,
                                     tags=tagsFix)
                self.varS = abs(parFix.entropy_SD)
                self.ss = parFix.entropy

            self.tags = tagsFix
        self.uh = par.uh
        self.duh = par.duhdr
        self.rad = par.radius
        self.ro = self.rad[0]
        self.ri = self.rad[-1]

        vol_oc = 4. / 3. * np.pi * (self.ro**3 - self.ri**3)
        self.rey_fluct = np.sqrt(2. * e2fluct / vol_oc)

        self.reh = 4. * np.pi * intcheb(self.rad**2 * self.uh,
                                        len(self.rad) - 1, self.ri,
                                        self.ro) / (4. / 3. * np.pi *
                                                    (self.ro**3 - self.ri**3))

        # Thermal dissipation boundary layer
        if hasattr(par, 'dissS'):
            self.dissS = par.dissS
            self.epsT = -4. * np.pi * intcheb(self.rad**2 * self.dissS,
                                              len(self.rad) - 1, self.ro,
                                              self.ri)
            self.epsTR = 4. * np.pi * self.rad**2 * self.dissS
            ind = getMaxima(-abs(self.epsTR - self.epsT))

            try:
                self.dissTopS = self.ro - self.rad[ind[0]]
                self.dissBotS = self.rad[ind[-1]] - self.ri
                self.dissEpsTbl, self.dissEpsTbulk = integBulkBc(
                    self.rad, self.epsTR, self.ri, self.ro, self.dissBotS,
                    self.dissTopS)
            except IndexError:
                self.dissTopS = self.ro
                self.dissBotS = self.ri
                self.dissEpsTbl, self.dissEpsTbulk = 0., 0.

            print('thDiss bl, bulk', self.dissEpsTbl / self.epsT,
                  self.dissEpsTbulk / self.epsT)
        # First way of defining the thermal boundary layers: with var(S)
        #rThLayer = getMaxima(self.rad, self.varS)
        ind = argrelextrema(self.varS, np.greater)[0]
        if len(ind) != 0:
            self.bcTopVarS = self.ro - self.rad[ind[0]]
            self.bcBotVarS = self.rad[ind[-1]] - self.ri
        else:
            self.bcTopVarS = 1.
            self.bcBotVarS = 1.
        if hasattr(self, 'epsT'):
            self.varSEpsTbl, self.varSEpsTbulk = integBulkBc(
                self.rad, self.epsTR, self.ri, self.ro, self.bcBotVarS,
                self.bcTopVarS)
            print('var(S) bl, bulk', self.varSEpsTbl / self.epsT,
                  self.varSEpsTbulk / self.epsT)

        # Second way of defining the thermal boundary layers: intersection of the slopes
        d1 = matder(len(self.rad) - 1, self.ro, self.ri)
        self.ttm = 3.*intcheb(self.ss*self.rad**2, len(self.rad)-1, self.ri, self.ro) \
                   /(self.ro**3-self.ri**3)
        dsdr = np.dot(d1, self.ss)
        self.beta = dsdr[len(dsdr) / 2]
        print('beta={:.2f}'.format(self.beta))
        self.slopeTop = dsdr[2] * (self.rad - self.ro) + self.ss[0]
        self.slopeBot = dsdr[-1] * (self.rad - self.ri) + self.ss[-1]

        self.dtdrm = dsdr[len(self.ss) / 2]
        tmid = self.ss[len(self.ss) / 2]
        self.slopeMid = self.dtdrm * (self.rad -
                                      self.rad[len(self.rad) / 2]) + tmid

        self.bcTopSlope = (tmid - self.ss[0]) / (self.dtdrm - dsdr[2])
        self.bcBotSlope = -(tmid - self.ss[-1]) / (self.dtdrm - dsdr[-1])

        # 2nd round with a more accurate slope
        bSlope = dsdr[self.rad <= self.ri + self.bcBotSlope / 4.].mean()
        tSlope = dsdr[self.rad >= self.ro - self.bcTopSlope / 4.].mean()
        self.slopeBot = bSlope * (self.rad - self.ri) + self.ss[-1]
        self.slopeTop = tSlope * (self.rad - self.ro) + self.ss[0]
        #self.bcTopSlope = -(self.ttm-self.ss[0])/tSlope
        self.bcTopSlope = -(tmid - self.dtdrm * self.rad[len(self.rad) / 2] -
                            self.ss[0] + tSlope * self.ro) / (self.dtdrm -
                                                              tSlope)
        self.bcBotSlope = -(tmid - self.dtdrm * self.rad[len(self.rad) / 2] -
                            self.ss[-1] + bSlope * self.ri) / (self.dtdrm -
                                                               bSlope)
        self.dto = tSlope * (self.bcTopSlope - self.ro) + self.ss[0]
        self.dti = bSlope * (self.bcBotSlope - self.ri) + self.ss[-1]
        self.dto = self.dto - self.ss[0]
        self.dti = self.ss[-1] - self.dti

        self.bcTopSlope = self.ro - self.bcTopSlope
        self.bcBotSlope = self.bcBotSlope - self.ri

        if hasattr(self, 'epsT'):
            self.slopeEpsTbl, self.slopeEpsTbulk = integBulkBc(
                self.rad, self.epsTR, self.ri, self.ro, self.bcBotSlope,
                self.bcTopSlope)

            print('slopes bl, bulk', self.slopeEpsTbl / self.epsT,
                  self.slopeEpsTbulk / self.epsT)

        pow = MagicRadial(field='powerR', iplot=False, tags=tags)
        self.vi = pow.viscDiss
        self.buo = pow.buoPower

        self.epsV = -intcheb(self.vi, len(self.rad) - 1, self.ro, self.ri)
        ind = getMaxima(-abs(self.vi - self.epsV))
        if len(ind) > 2:
            for i in ind:
                if self.vi[i - 1] - self.epsV > 0 and self.vi[
                        i + 1] - self.epsV < 0:
                    self.dissTopV = self.ro - self.rad[i]
                elif self.vi[i - 1] - self.epsV < 0 and self.vi[
                        i + 1] - self.epsV > 0:
                    self.dissBotV = self.rad[i] - self.ri
        else:
            self.dissTopV = self.ro - self.rad[ind[0]]
            self.dissBotV = self.rad[ind[-1]] - self.ri
        try:
            self.dissEpsVbl, self.dissEpsVbulk = integBulkBc(
                self.rad, self.vi, self.ri, self.ro, self.dissBotV,
                self.dissTopV)
        except AttributeError:
            self.dissTopV = 0.
            self.dissBotV = 0.
            self.dissEpsVbl = 0.
            self.dissEpsVbulk = 0.

        print('visc Diss bl, bulk', self.dissEpsVbl / self.epsV,
              self.dissEpsVbulk / self.epsV)

        # First way of defining the viscous boundary layers: with duhdr
        #rViscousLayer = getMaxima(self.rad, self.duh)
        if self.kbotv == 1 and self.ktopv == 1:
            ind = argrelextrema(self.duh, np.greater)[0]
            if len(ind) == 0:
                self.bcTopduh = 1.
                self.bcBotduh = 1.
            else:
                if ind[0] < 4:
                    self.bcTopduh = self.ro - self.rad[ind[1]]
                else:
                    self.bcTopduh = self.ro - self.rad[ind[0]]
                if len(self.rad) - ind[-1] < 4:
                    self.bcBotduh = self.rad[ind[-2]] - self.ri
                else:
                    self.bcBotduh = self.rad[ind[-1]] - self.ri
            self.slopeTopU = 0.
            self.slopeBotU = 0.
            self.uhTopSlope = 0.
            self.uhBotSlope = 0.
            self.slopeEpsUbl = 0.
            self.slopeEpsUbulk = 0.
            self.uhBot = 0.
            self.uhTop = 0.
        else:
            ind = argrelextrema(self.uh, np.greater)[0]
            if len(ind) == 1:
                ind = argrelextrema(self.uh, np.greater_equal)[0]
            if len(ind) == 0:
                self.bcTopduh = 1.
                self.bcBotduh = 1.
            else:
                if ind[0] < 4:
                    self.bcTopduh = self.ro - self.rad[ind[1]]
                else:
                    self.bcTopduh = self.ro - self.rad[ind[0]]
                if len(self.rad) - ind[-1] < 4:
                    self.bcBotduh = self.rad[ind[-2]] - self.ri
                else:
                    self.bcBotduh = self.rad[ind[-1]] - self.ri

            self.uhTop = self.uh[self.rad == self.ro - self.bcTopduh][0]
            self.uhBot = self.uh[self.rad == self.ri + self.bcBotduh][0]

            self.bcBotduh, self.bcTopduh, self.uhBot, self.uhTop =      \
                        getAccuratePeaks(self.rad, self.uh, self.uhTop, \
                                         self.uhBot, self.ri, self.ro)

            duhdr = np.dot(d1, self.uh)

            #1st round
            mask = (self.rad >= self.ro - self.bcTopduh / 4) * (self.rad <
                                                                self.ro)
            slopeT = duhdr[mask].mean()
            mask = (self.rad <= self.ri + self.bcBotduh / 4) * (self.rad >
                                                                self.ri)
            slopeB = duhdr[mask].mean()
            self.slopeTopU = slopeT * (self.rad - self.ro) + self.uh[0]
            self.slopeBotU = slopeB * (self.rad - self.ri) + self.uh[-1]
            self.uhTopSlope = -self.uhTop / slopeT
            self.uhBotSlope = self.uhBot / slopeB

            #2nd round
            mask = (self.rad >= self.ro - self.uhTopSlope / 4.) * (self.rad <
                                                                   self.ro)
            slopeT = duhdr[mask].mean()
            mask = (self.rad <= self.ri + self.uhBotSlope / 4) * (self.rad >
                                                                  self.ri)
            slopeB = duhdr[mask].mean()
            self.uhTopSlope = -self.uhTop / slopeT
            self.uhBotSlope = self.uhBot / slopeB

            self.slopeEpsUbl, self.slopeEpsUbulk = integBulkBc(
                self.rad, self.vi, self.ri, self.ro, self.uhBotSlope,
                self.uhTopSlope)

        self.uhEpsVbl, self.uhEpsVbulk = integBulkBc(self.rad, self.vi,
                                                     self.ri, self.ro,
                                                     self.bcBotduh,
                                                     self.bcTopduh)
        print('uh bl, bulk', self.uhEpsVbl / self.epsV,
              self.uhEpsVbulk / self.epsV)

        # Convective Rol in the thermal boundary Layer
        par = MagicRadial(field='parR', iplot=False, tags=tags)
        kin = MagicRadial(field='eKinR', iplot=False, tags=tags)
        ekinNas = kin.ekin_pol + kin.ekin_tor - kin.ekin_pol_axi - kin.ekin_tor_axi
        ReR = np.sqrt(2. * abs(ekinNas) / par.radius**2 / (4. * np.pi))
        RolC = ReR * par.ek / par.dlVc

        self.dl = par.dlVc
        y = RolC[par.radius >= self.ro - self.bcTopSlope]
        x = par.radius[par.radius >= self.ro - self.bcTopSlope]
        try:
            self.rolTop = simps(3. * y * x**2,
                                x) / (self.ro**3 -
                                      (self.ro - self.bcTopSlope)**3)
        except IndexError:
            self.rolTop = 0.

        self.rolbl, self.rolbulk = integBulkBc(self.rad,
                                               4. * np.pi * RolC * self.rad**2,
                                               self.ri,
                                               self.ro,
                                               self.bcBotSlope,
                                               self.bcTopSlope,
                                               normed=True)

        self.rebl, self.rebulk = integBulkBc(self.rad,
                                             4. * np.pi * ReR * self.rad**2,
                                             self.ri,
                                             self.ro,
                                             self.bcBotSlope,
                                             self.bcTopSlope,
                                             normed=True)

        self.lengthbl, self.lengthbulk = integBulkBc(self.rad,
                                                     self.dl * 4. * np.pi *
                                                     self.rad**2,
                                                     self.ri,
                                                     self.ro,
                                                     self.bcBotSlope,
                                                     self.bcTopSlope,
                                                     normed=True)

        self.rehbl, self.rehbulk = integBulkBc(self.rad,
                                               self.uh * 4. * np.pi *
                                               self.rad**2,
                                               self.ri,
                                               self.ro,
                                               self.bcBotduh,
                                               self.bcTopduh,
                                               normed=True)

        y = RolC[par.radius <= self.ri + self.bcBotSlope]
        x = par.radius[par.radius <= self.ri + self.bcBotSlope]
        self.rolBot = simps(3. * y * x**2, x) / (
            (self.ri + self.bcBotSlope)**3 - self.ri**3)
        print('reynols bc, reynolds bulk', self.rebl, self.rebulk)
        print('reh bc, reh bulk', self.rehbl, self.rehbulk)
        print('rolbc, rolbulk, roltop, rolbot', self.rolbl, self.rolbulk,
              self.rolBot, self.rolTop)

        par.dlVc[0] = 0.
        par.dlVc[-1] = 0.
        self.lBot, self.lTop = integBotTop(self.rad,
                                           4. * np.pi * self.rad**2 * par.dlVc,
                                           self.ri,
                                           self.ro,
                                           self.bcBotSlope,
                                           self.bcTopSlope,
                                           normed=True)

        uhbm, utbm = integBotTop(self.rad,
                                 4. * np.pi * self.uh,
                                 self.ri,
                                 self.ro,
                                 self.bcBotSlope,
                                 self.bcTopSlope,
                                 normed=True)

        # Convective Rol in the thermal boundary Layer
        if len(scanDir('perpParR.*')) != 0:
            tags = []
            for lg in logFiles:
                nml = MagicSetup(quiet=True, nml=lg)
                if nml.start_time > tstart:
                    if os.path.exists('perpParR.{}'.format(nml.tag)):
                        tags.append(nml.tag)
            perpPar = MagicRadial(field='perpParR', iplot=False, tags=tags)
            eperpNas = perpPar.Eperp - perpPar.Eperp_axi
            eparNas = perpPar.Epar - perpPar.Epar_axi
            RePerpNas = np.sqrt(2. * abs(eperpNas))
            ReParNas = np.sqrt(2. * abs(eparNas))
            RePerp = np.sqrt(2. * abs(perpPar.Eperp))
            RePar = np.sqrt(2. * abs(perpPar.Epar))

            self.reperpbl, self.reperpbulk = integBulkBc(self.rad,
                                                         4. * np.pi * RePerp *
                                                         self.rad**2,
                                                         self.ri,
                                                         self.ro,
                                                         self.bcBotSlope,
                                                         self.bcTopSlope,
                                                         normed=True)
            self.reparbl, self.reparbulk = integBulkBc(self.rad,
                                                       4. * np.pi * RePar *
                                                       self.rad**2,
                                                       self.ri,
                                                       self.ro,
                                                       self.bcBotSlope,
                                                       self.bcTopSlope,
                                                       normed=True)
            self.reperpnasbl, self.reperpnasbulk = integBulkBc(
                self.rad,
                4. * np.pi * RePerpNas * self.rad**2,
                self.ri,
                self.ro,
                self.bcBotSlope,
                self.bcTopSlope,
                normed=True)
            self.reparnasbl, self.reparnasbulk = integBulkBc(
                self.rad,
                4. * np.pi * ReParNas * self.rad**2,
                self.ri,
                self.ro,
                self.bcBotSlope,
                self.bcTopSlope,
                normed=True)
        else:
            self.reperpbl = 0.
            self.reperpbulk = 0.
            self.reparbl = 0.
            self.reparbulk = 0.
            self.reperpnasbl = 0.
            self.reperpnasbulk = 0.
            self.reparnasbl = 0.
            self.reparnasbulk = 0.

        if iplot:
            self.plot()

        if not quiet:
            print(self)
momentxz = pd.read_csv("momentxz.csv")
momentxy = pd.read_csv("momentxy.csv")

y_1 = []
x_1 = []
for i in range(0, len(momentxz.index)):
    if i < len(momentxz.index) - 1:
        a = momentxz.values[i, 1]
        b = momentxz.values[i + 1, 1]
        c = momentxz.values[i, 0]
        d = momentxz.values[i + 1, 0]

        fy = [a, b]
        fx = [c, d]
        dx = (c + d) / 2
        first_int = integrate.simps(fy)
        y_1.append(first_int)
        x_1.append(dx)

y_deflections = []
x_coordinates = []

#1349
y_1[1349] = 0

for i in range(0, len(y_1)):
    if i < len(y_1) - 1:
        a = y_1[i]
        b = y_1[i + 1]
        c = x_1[i]
        d = x_1[i + 1]
Exemplo n.º 56
0
    def build(self,
              age,
              sfh,
              dust,
              metal,
              fesc=1.,
              sfh_law='exp',
              dustmodel='calzetti',
              neb_cont=True,
              neb_met=True):
        """
        
        
        """
        self.tg = age * 1.e9
        if sfh_law == 'exp':
            self.tau = sfh * 1.e9
        elif sfh_law == 'del':
            self.tau = sfh * 1.e9
        else:
            self.tau = sfh
        self.tauv = dust
        self.mi = int(abs(metal))
        self.fesc = fesc
        self.sfh_law = sfh_law
        self.inc_cont = neb_cont
        self.inc_met = neb_met
        self.dust_model = dustmodel

        mu = 0.3
        epsilon = 0.

        self.ta = self.ta_arr[self.mi]
        self.wave = self.wave_arr[self.mi]

        [T1, T2] = numpy.meshgrid(self.tg, self.ta)
        tgi = numpy.argmin(numpy.abs(self.tg - self.ta))
        self.tg = self.ta[tgi]

        if len(self.neb_wave) != len(self.wave):
            self.neb_cont = griddata(self.neb_wave, self.neb_cont, self.wave)
            self.neb_hlines = griddata(self.neb_wave, self.neb_hlines,
                                       self.wave)
            neb_metaln = numpy.zeros((len(self.wave), 3))
            for i in range(3):
                neb_metaln[:, i] = griddata(self.neb_wave,
                                            self.neb_metal[:, i], self.wave)
            self.neb_metal = neb_metaln
            self.neb_wave = self.wave
        #quietprint("Metallicity "+str(self.mi+1)+":")

    #print ".ised file: "+files[abs(SSP)]
        sed = self.sed_arr[self.mi]
        strm = self.strm_arr[self.mi]
        rmtm = self.rmtm_arr[self.mi]
        self.iw = self.iw_arr[self.mi]
        metal = str((self.metal_arr[self.mi]))[12:-3].strip()
        #quietprint(metal[self.mi] + "\nInclude nebular emission: " + str(add_nebular))
        SSP_Z = float(re.split("Z=?", metal)[1])
        #print SSP_Z,
        if SSP_Z <= 0.0004: neb_z = 0
        elif SSP_Z > 0.0004 and SSP_Z <= 0.004: neb_z = 1
        elif SSP_Z > 0.004: neb_z = 2
        #print neb_z

        if self.dust_model == "charlot":
            ATT = numpy.empty([len(self.wave), len(self.ta)])
            tv = ((self.tauv / 1.0857) * numpy.ones(len(self.ta)))
            tv[self.ta > 1e7] = mu * self.tauv
            lam = numpy.array((5500 / self.wave)**0.7)
            ATT[:, :] = (numpy.exp(-1 * numpy.outer(lam, tv)))

        elif self.dust_model == "calzetti":
            ATT = numpy.ones([len(self.wave), len(self.ta)])
            k = numpy.zeros_like(self.wave)

            w0 = [self.wave <= 1200]
            w1 = [self.wave < 6300]
            w2 = [self.wave >= 6300]
            w_u = self.wave / 1e4

            x1 = numpy.argmin(numpy.abs(self.wave - 1200))
            x2 = numpy.argmin(numpy.abs(self.wave - 1250))

            k[w2] = 2.659 * (-1.857 + 1.040 / w_u[w2])
            k[w1] = 2.659 * (-2.156 + (1.509 / w_u[w1]) -
                             (0.198 / w_u[w1]**2) + (0.011 / w_u[w1]**3))
            k[w0] = k[x1] + ((self.wave[w0] - 1200.) * (k[x1] - k[x2]) /
                             (self.wave[x1] - self.wave[x2]))

            k += 4.05
            k[k < 0.] = 0.

            tv = self.tauv * k / 4.05
            for ti in range(0, len(self.ta)):
                ATT[:, ti] *= numpy.power(10, -0.4 * tv)

        elif self.dust_model == "calzetti2":
            ATT = numpy.ones([len(self.wave), len(self.ta)])
            k = numpy.zeros_like(self.wave)

            w0 = [self.wave <= 1000]
            w1 = [(self.wave > 1000) * (self.wave < 6300)]
            w2 = [self.wave >= 6300]
            w_u = self.wave / 1e4

            k[w2] = 2.659 * (-1.857 + 1.040 / w_u[w2])
            k[w1] = 2.659 * (-2.156 + (1.509 / w_u[w1]) -
                             (0.198 / w_u[w1]**2) + (0.011 / w_u[w1]**3))

            p1 = self.dust_func(self.wave, 27, 4, 5.5, 0.08) + self.dust_func(
                self.wave, 185, 90, 2, 0.042)

            k[w0] = p1[w0] / (p1[w1][0] / k[w1][0])
            k += 4.05
            k[k < 0.] = 0.
            tv = self.tauv * k / 4.05
            for ti in range(0, len(self.ta)):
                ATT[:, ti] *= numpy.power(10, -0.4 * tv)

        elif self.dust_model == "smc":
            ai = [185., 27., 0.005, 0.01, 0.012, 0.03]
            bi = [90., 5.5, -1.95, -1.95, -1.8, 0.]
            ni = [2., 4., 2., 2., 2., 2.]
            li = [0.042, 0.08, 0.22, 9.7, 18., 25.]

            eta = numpy.zeros_like(self.wave)
            for i in xrange(len(ai)):
                eta += self.dust_func(self.wave, ai[i], bi[i], ni[i], li[i])

            Rv = 2.93
            Ab = self.tauv * (1 + (1 / Rv))

            print numpy.exp(self.tauv * eta)
            ATT = numpy.ones([len(self.wave), len(self.ta)])
            for ti in range(0, len(self.ta)):
                ATT[:, ti] *= numpy.power(10, -0.4 * (Ab * eta))
                #Offset added to renormalise from B to V band
                #ATT[:,ti] *= numpy.exp(-1*self.tauv*eta)

        elif self.dust_model == "lmc":
            ai = [175., 19., 0.023, 0.005, 0.006, 0.02]
            bi = [90., 4.0, -1.95, -1.95, -1.8, 0.]
            ni = [2., 4.5, 2., 2., 2., 2.]
            li = [0.046, 0.08, 0.22, 9.7, 18., 25.]

            eta = numpy.zeros_like(self.wave)
            for i in xrange(len(ai)):
                eta += self.dust_func(self.wave, ai[i], bi[i], ni[i], li[i])

            Rv = 3.16
            Ab = self.tauv * (1 + (1 / Rv))

            ATT = numpy.ones([len(self.wave), len(self.ta)])
            for ti in range(0, len(self.ta)):
                ATT[:, ti] *= numpy.power(10, -0.4 * (Ab * eta))
                #Offset added to renormalise from B to V band
                #ATT[:,ti] *= numpy.exp(-1*self.tauv*eta)

        elif self.dust_model == "mw":
            ai = [165., 14., 0.045, 0.002, 0.002, 0.012]
            bi = [90., 4., -1.95, -1.95, -1.8, 0.]
            ni = [2., 6.5, 2., 2., 2., 2.]
            li = [0.047, 0.08, 0.22, 9.7, 18., 25.]

            eta = numpy.zeros_like(self.wave)
            for i in xrange(len(ai)):
                eta += self.dust_func(self.wave, ai[i], bi[i], ni[i], li[i])

            Rv = 3.08
            Ab = self.tauv * (1 + (1 / Rv))

            ATT = numpy.ones([len(self.wave), len(self.ta)])
            for ti in range(0, len(self.ta)):
                ATT[:, ti] *= numpy.power(10, -0.4 * (Ab * eta))
                #Offset added to renormalise from B to V band
                #ATT[:,ti] *= numpy.exp(-1*self.tauv*eta)
        """
        SECTION 1
        First calculate and store those parameters that are functions of the age array 
        'ta' only - these are the same for every model to be made. The parameters are 
        the age array TP, the time interval array DT, the interpolation coefficient 
        'a' and the interpolation indices J. Each are stored in cell arrays of size ks,
        with the data corresponding to the original age array first, and the 
        interpolated data second.
        """
        self.TP = {}
        self.A = {}
        self.J = {}
        self.DT = {}

        for ai in range(tgi + 1):
            #Calculate taux2: the reverse age array; remove those values which
            #are less than the first non-zero entry of taux1 - these values
            #are treated differently in the original BC code
            taux1 = self.ta[:ai + 1]
            taux2 = self.ta[ai] - self.ta[ai::-1]
            if max(taux1) > 0.:
                taux2 = numpy.delete(
                    taux2,
                    numpy.where(taux2 < taux1[numpy.flatnonzero(taux1)[0]]))
            #Remove values common to taux1 and taux2; calulate array TP

            [T1, T2] = numpy.meshgrid(taux1, taux2)
            [i, j] = numpy.where(T1 - T2 == 0)
            taux2 = numpy.delete(taux2, i)
            self.TP[ai] = self.ta[ai] - numpy.concatenate(
                (taux1, taux2), axis=0)
            l = len(taux2)

            #If taux2 has entries, calculate the interpolation parameters a and J.
            #The indicies correspond to those values of 'ta' which are just below
            #the entries in taux2. They are calculated by taking the difference
            #between the two arrays, then finding the last negative entry in the
            #resulting array.

            if l == 0:
                self.J[ai] = numpy.array([])
                self.A[ai] = numpy.array([])
            if l > 0:
                [T1, T2] = numpy.meshgrid(self.ta, taux2)
                T = T1 - T2
                T[numpy.where(T <= 0)] = 0
                T[numpy.where(T != 0)] = 1
                T = numpy.diff(T, 1, 1)
                (i, self.J[ai]) = T.nonzero()

                self.A[ai] = (
                    numpy.log10(taux2 / self.ta[self.J[ai]]) /
                    numpy.log10(self.ta[self.J[ai] + 1] / self.ta[self.J[ai]]))

            #Calculate age difference array: the taux arrays are joined and
            #sorted, the differences calculated, then rearranged back to the order
            #of the original taux values.
            taux = numpy.concatenate((taux1, taux2), axis=0)
            taux.sort()

            b = numpy.searchsorted(taux, taux1)
            c = numpy.searchsorted(taux, taux2)
            order = numpy.concatenate((b, c))

            d = numpy.diff(taux)
            dt = numpy.append(d, 0) + numpy.append(0, d)
            self.DT[ai] = numpy.copy(dt[order])

        SED = numpy.empty([len(self.wave)])
        Nlyman = numpy.empty([1])
        Nlyman_final = numpy.empty([1])
        beta = numpy.empty([1])
        norm = numpy.empty([1])
        STR = numpy.empty([tgi + 1])
        SFR = numpy.empty([tgi + 1])
        W = {}
        # metal=[str((self.data[1]))[12:-3].strip()]*len(params.metallicities)

        RMr = numpy.empty([tgi + 1])
        PRr = numpy.empty([tgi + 1])
        URr = numpy.empty([tgi + 1])
        Tr = numpy.empty([tgi + 1])
        """
        SECTION 2
        Now calculate the integration coefficients w, and store them in the
        cell array W. Also calculate the stellar mass fraction str. The so
        array is expanded and used by each successive iteration of the inner
        loop (ai). The outer loop repeats the operation for each tau value.

        """

        prgas = numpy.zeros(tgi + 1)

        for ai in xrange(tgi + 1):
            j = self.J[ai]  #Interpolation indices
            tp = self.TP[ai]  #Integration timescale

            pgas = numpy.zeros_like(tp)
            if ai == 0:
                prgas = numpy.zeros_like(self.ta)
            else:
                i = numpy.where(tp <= self.ta[ai - 1])
                ii = numpy.where(tp > self.ta[ai - 1])
                pgas[i] = griddata(self.ta, prgas, tp[i])
                pgas[ii] = prgas[ai - 1]
            #print prgas[ai]

            tbins = numpy.logspace(0, numpy.log10(max(tp)), 1000)
            npgas = numpy.zeros_like(tbins)

            if self.sfh_law == 'exp':
                if self.tau > 0.:
                    sr = (1 + epsilon * pgas) * numpy.exp(
                        -1 * tp / self.tau) / abs(self.tau)
                    norma = 1
                    if len(sr) > 1:
                        i = numpy.where(tbins <= self.ta[ai - 1])
                        ii = numpy.where(tbins > self.ta[ai - 1])
                        npgas[i] = griddata(self.ta, prgas, tbins[i])
                        npgas[ii] = prgas[ai - 1]
                        norma = simps(
                            (1 + epsilon * npgas) *
                            numpy.exp(-1 * tbins / self.tau) / abs(self.tau),
                            tbins)
                        sr /= norma

                elif self.tau < 0.:
                    sr = numpy.exp(-1 * tp / self.tau) / abs(self.tau)
                    norma = 1
                    self.sr = sr
                    if len(sr) > 1:
                        norma = simps(
                            numpy.exp(-1 * tbins / self.tau) / abs(self.tau),
                            tbins)
                        sr /= norma
                    #print sr[0]
                    self.norma = norma

                w = sr * self.DT[ai] / 2
                w1 = numpy.array(w[:ai + 1])
                W[0, ai] = w1

                strr = numpy.array(numpy.dot(w1, strm[:ai + 1]))
                rm = numpy.array(numpy.dot(w1, rmtm[:ai + 1]))

                l = len(self.A[ai])
                if l > 0:

                    w2 = w[ai + 1:ai + l + 1]
                    wa = w2 * self.A[ai]
                    wb = w2 - wa

                    W[1, ai] = wa
                    W[2, ai] = wb
                    strr += (numpy.dot(wb, strm[j]) +
                             numpy.dot(wa, strm[j + 1]))
                    rm += (numpy.dot(wb, rmtm[j]) + numpy.dot(wa, rmtm[j + 1]))

                if strr > 1: strr = 1

                if self.tau > 0.:
                    ugas = numpy.exp(-1 * self.ta[ai] / self.tau)
                elif self.tau < 0.:
                    ugas = numpy.exp(-1 * self.ta[ai] / self.tau) / numpy.exp(
                        -1 * max(self.ta) / self.tau)
                    #ugas = 1.
                #Processed gas = gas formed into stars - mass in stars - remnants
                prgas[ai] = 1 - ugas - strr - rm
                if prgas[ai] < 0.: prgas[ai] = 0

                #print prgas[ai]
                URr[ai] = ugas
                PRr[ai] = prgas[ai]
                RMr[ai] = rm
                Tr[ai] = simps(
                    numpy.exp(-1 * numpy.sort(tp) / self.tau) / self.tau,
                    numpy.sort(tp))

                STR[ai] = strr
                if self.tau > 0:
                    SFR[ai] = (1 + epsilon * prgas[ai]) * numpy.exp(
                        -self.ta[ai] / self.tau) / abs(self.tau) / norma
                elif self.tau < 0:
                    SFR[ai] = numpy.exp(-self.ta[ai] / self.tau) / abs(
                        self.tau) / norma
                #print SFR[ai,ti,mi]
                #self.SN = float(snn)
                SFR[ai] /= STR[ai]
            else:
                if self.sfh_law == 'pow':
                    sfr = self._sfh_pow
                elif self.sfh_law == 'del':
                    sfr = self._sfh_del
                elif self.sfh_law == 'tru':
                    sfr = self._sfh_tru

                sr = sfr(tp, self.tau)
                self.tp = tp
                norma = 1
                self.sr = sr
                if len(sr) > 1:
                    norma = simps(sfr(tbins, self.tau), tbins)
                    sr /= norma
                self.norma = norma
                #print sr[0]

                w = sr * self.DT[ai] / 2
                w1 = numpy.array(w[:ai + 1])
                W[0, ai] = w1

                strr = numpy.array(numpy.dot(w1, strm[:ai + 1]))
                rm = numpy.array(numpy.dot(w1, rmtm[:ai + 1]))

                l = len(self.A[ai])
                if l > 0:

                    w2 = w[ai + 1:ai + l + 1]
                    wa = w2 * self.A[ai]
                    wb = w2 - wa

                    W[1, ai] = wa
                    W[2, ai] = wb
                    strr += (numpy.dot(wb, strm[j]) +
                             numpy.dot(wa, strm[j + 1]))
                    rm += (numpy.dot(wb, rmtm[j]) + numpy.dot(wa, rmtm[j + 1]))

                if strr > 1: strr = 1

                if self.tau > 0.:
                    ugas = sfr(self.ta, self.tau)[ai]
                elif self.tau < 0.:
                    ugas = sfr(self.ta, self.tau)[ai] / sfr(
                        max(self.ta), self.tau)
                    #ugas = 1.
                #Processed gas = gas formed into stars - mass in stars - remnants
                prgas[ai] = 1 - ugas - strr - rm
                if prgas[ai] < 0.: prgas[ai] = 0

                #print prgas[ai]
                URr[ai] = ugas
                PRr[ai] = prgas[ai]
                RMr[ai] = rm
                Tr[ai] = simps(sfr(numpy.sort(tp) / 1.e9, self.tau),
                               numpy.sort(tp))

                STR[ai] = strr
                if self.tau > 0:
                    SFR[ai] = (1 + epsilon * prgas[ai]) * sfr(
                        self.ta, self.tau)[ai] / norma
                elif self.tau < 0:
                    SFR[ai] = sfr(self.ta[ai], self.tau) / norma
                #print SFR[ai,ti,mi]
                #self.SN = float(snn)
                SFR[ai] /= STR[ai]
        """
        SECTION 3
        Finally, for each tauv/tau/tg combination, perform a weighted
        sum of the S.S.params. spectral energy distribution 'sed1' to obtain the
        model S.E.D. 'y'. Add each record to the SED array.
        """

        sed1 = sed * ATT  #dust-attenuated SED

        ai = tgi
        y = numpy.zeros([1, self.iw])
        y_nodust = numpy.zeros([1, self.iw])
        j = self.J[ai]

        w1 = W[0, ai]
        wa = W[1, ai]
        wb = W[2, ai]

        for i in range(ai):
            y += (w1[i] * sed1[:, i])
            y_nodust += (w1[i] * sed[:, i])

        for i in range(len(wb)):
            y += (wb[i] * sed1[:, j[i]] + wa[i] * sed1[:, j[i] + 1])
            y_nodust += (wb[i] * sed[:, j[i]] + wa[i] * sed[:, j[i] + 1])

        Nly = self.calc_lyman(self.wave, numpy.nan_to_num(y_nodust[0]))
        #print Nly
        if Nly > 0.:
            Nlyman = numpy.log10(Nly)
        else:
            Nlyman = 0.

        total = (self.neb_cont * self.inc_cont) + self.neb_hlines + (
            self.neb_metal[:, neb_z] * self.inc_met)
        total *= 2.997925e18 / (self.wave**2)  #Convert to Flambda
        total *= (Nly * (1 - self.fesc))

        y += total

        Nly = self.calc_lyman(self.wave, numpy.nan_to_num(y[0] / STR[ai]))
        #print Nly
        self.fesc_tot = (self.fesc * Nly) / 10**Nlyman
        if Nly > 0.:
            Nlyman_final = numpy.log10(Nly) + 33. + numpy.log10(3.826)
            if self.fesc > 0.:
                Nlyman_final = numpy.log10(10**Nlyman_final * self.fesc)
            elif self.fesc == 0:
                Nlyman_final = 0.
        else:
            Nlyman_final = 0.

        beta = self.calc_beta(self.wave, y[0])
        #print ai,ai1
        #print STR[ai1,ti,mi]
        SED[:] = y / STR[ai]  #normalised to 1 solar mass
        norm = simps(
            numpy.exp(-1 *
                      numpy.logspace(0, numpy.log10(self.ta[tgi]), 10000) /
                      self.tau),
            numpy.logspace(0, numpy.log10(self.ta[tgi]), 10000))

        STR = STR[tgi]
        SFR = SFR[tgi]

        self.SED = SED
        self.SFR = SFR / STR
        self.STR = STR
        self.beta = beta
        self.Nly = Nlyman_final
        self.Ms = 1.
Exemplo n.º 57
0
 def _volume_differential_comoving(self,z_low,z_upp,N=100):
     z_arr = np.linspace(z_low, z_upp, N)
     dVC = self.cosmo.differential_comoving_volume(z_arr).to(u.Mpc**3 / u.deg**2).value
     return simps(dVC,z_arr)
Exemplo n.º 58
0
erange = (energy[0], energy[-1])
emask = (energy >= emin) & (energy <= emax
                            )  # bool to make a mapping between energy and dos

list_pup = [l.split()[3] for l in open(file, 'rb')]
list_pup.pop(0)
p_up = np.array([float(i) for i in list_pup])  # extract p_up
list_pdown = [l.split()[4] for l in open(file, 'rb')]
list_pdown.pop(0)
p_down = np.array([float(i) for i in list_pdown])  # extract p_down

x = energy[emask]
y1 = p_up[emask]
y2 = p_down[emask]

pbc_up = simps(y1 * x, x) / simps(y1, x)
pbc_down = simps(y2 * x, x) / simps(y2, x)
pbc = []
pbc.append(pbc_up)
pbc.append(pbc_down)

###### calculaton of d-band center ######

## same energy set ##

list_dup = [l.split()[5] for l in open(file, 'rb')]
list_dup.pop(0)
d_up = np.array([float(i) for i in list_dup])  # extract d_up
list_ddown = [l.split()[6] for l in open(file, 'rb')]
list_ddown.pop(0)
d_down = np.array([float(i) for i in list_ddown])  # extract d_down
Exemplo n.º 59
0
g = pyrads.SetupGrids.make_grid(Ts,
                                Tstrat,
                                N_press,
                                wavenr_min,
                                wavenr_max,
                                dwavenr,
                                params,
                                RH=params.RH)

# compute optical thickness:
#   -> this is the computationally most intensive step
g.tau, g.omega = pyrads.OpticalThickness.compute_tau_omega_H2ON2(g.p,
                                                                 g.T,
                                                                 g.q,
                                                                 g,
                                                                 params,
                                                                 RH=params.RH)

# compute Planck functions etc:
#   -> here: fully spectrally resolved!
T_2D = np.tile(g.T, (g.Nn, 1)).T  # [press x wave]
g.B_surf = np.pi * pyrads.Planck.Planck_n(g.n, Ts)  # [wave]
g.B = np.pi * pyrads.Planck.Planck_n(g.wave, T_2D)  # [press x wave]

# compute OLR etc:
olr_spec = pyrads.Get_Fluxes.Fplus_alternative(
    0, g)  # (spectrally resolved=irradiance)
olr = simps(olr_spec, g.n)

print("OLR = ", olr)
Exemplo n.º 60
0
 def overlapArr(self, arr1, arr2):
     tempoverlap = np.zeros(self.n)
     for i in range(self.n):
         psi_comb = np.real(np.conjugate(arr1[i]) * arr2[i])
         tempoverlap[i] = simps(psi_comb, self.x)
     return tempoverlap