示例#1
0
def calculate_boxplot_stats ( x, **kwargs ):
    whis = kwargs.setdefault ( 'whis', 1.5 )
    bootstrap = kwargs.setdefault ( 'bootstrap', None )

    # Get median and quartiles
    q1,med,q3 = pl.prctile (x, [25,50,75] )
    # Get high extreme
    iq = q3-q1
    hi_val = q3+whis*iq
    wisk_hi = pl.compress ( x<=hi_val, x )
    if len(wisk_hi)==0:
        wisk_hi = q3
    else:
        wisk_hi = max(wisk_hi)
    # Get low extreme
    lo_val = q1-whis*iq
    wisk_lo = pl.compress ( x>=lo_val, x )
    if len(wisk_lo)==0:
        wisk_lo = q3
    else:
        wisk_lo = min(wisk_lo)

    # Get fliers
    flier_hi = pl.compress ( x>wisk_hi, x )
    flier_lo = pl.compress ( x<wisk_lo, x )

    if bootstrap is not None:
        # Do a bootstrap estimate of notch locations
        def bootstrapMedian ( data, N=5000 ):
            # determine 95% confidence intervals of the median
            M = len(data)
            percentile = [2.5,97.5]
            estimate = pl.zeros(N)
            for n in xrange (N):
                bsIndex = pl.randint ( 0, M, M )
                bsData = data[bsIndex]
                estimate[n] = pl.prctile ( bsData, 50 )
            CI = pl.prctile ( estimate, percentile )
            return CI
        CI = bootstrapMedian ( x, N=bootstrap )
        notch_max = CI[1]
        notch_min = CI[0]
    else:
        # Estimate notch locations using Gaussian-based asymptotic
        # approximation
        #
        # For discussion: McGill, R., Tukey, J.W., and
        # Larsen, W.A. (1978) "Variations of Boxplots", The
        # American Statistitian, 32:12-16
        notch_max = med + 1.57*iq/pl.sqrt(len(x))
        notch_min = med - 1.57*iq/pl.sqrt(len(x))
    return {'main':(wisk_lo,q1,med,q3,wisk_hi),
            'fliers':(flier_lo,flier_hi),
            'notch':(notch_min,notch_max)}
示例#2
0
def x_is_okay(x,xvec):

    n = len(x)                                                                  # Make sure "x" and "xvec" satisfy the conditions for
    m = len(xvec)                                                               # running the pchip interpolator

    xx = x.copy()                                                               # Make sure "x" is in sorted order (brute force, but works...)
    xx.sort()
    total_matches = (xx == x).sum()
    if total_matches != n:
        print "*" * 50
        print "x_is_okay()"
        print "x values weren't in sorted order --- aborting"
        return False

    delta = x[1:] - x[:-1]                                                      # Make sure 'x' doesn't have any repeated values
    if (delta == 0.0).any():
        print "*" * 50
        print "x_is_okay()"
        print "x values weren't monotonic--- aborting"
        return False

    check = xvec > x[-1]                                                        # Check for in-range xvec values (beyond upper edge)
    if check.any():
        print "*" * 50
        print "x_is_okay()"
        print "Certain 'xvec' values are beyond the upper end of 'x'"
        print "x_max = ", x[-1]
        indices = P.compress(check, range(m))
        print "out-of-range xvec's = ", xvec[indices]
        print "out-of-range xvec indices = ", indices
        return False

    check = xvec< x[0]                                                          # Second - check for in-range xvec values (beyond lower edge)
    if check.any():
        print "*" * 50
        print "x_is_okay()"
        print "Certain 'xvec' values are beyond the lower end of 'x'"
        print "x_min = ", x[0]
        indices = P.compress(check, range(m))
        print "out-of-range xvec's = ", xvec[indices]
        print "out-of-range xvec indices = ", indices
        return False

    return True
示例#3
0
def id_uniq_qm_px(level, qm_levels, idx_table, CUTOFF, DE, OFILE4):
    """ For a given level [Level|State]
        find the QM level and index of photoionisation table associated and return photoionization table.
    """
    EV_TO_CM = Cst.Q / 100.0 / Cst.H / Cst.C

    # Identification by configuration and term
    qm_cfgterm_list = [qm_level.cfg + " " + qm_level.term for qm_level in qm_levels]
    res = dl.get_close_matches(level.cfg + " " + str(level.term), qm_cfgterm_list, n=1, cutoff=CUTOFF)
    try:
        idx = qm_cfgterm_list.index(res[0])
    except IndexError:
        return None, None, None

    # See if theoretical and experimental energy are not too much different
    qm_e = qm_levels[idx].e / EV_TO_CM
    exp_e = level.e / EV_TO_CM
    if abs(qm_e - exp_e) > DE:
        # print('Warning: energy drift >', DE, ' eV:', level.cfg, level.term, ' '.join(res), ' (i.e. ID not reliable)')
        return None, None, None

    # Find indexes useful for direct access of the binary photoionization file
    try:
        i, nmin, ntot, m, l, p, pos = idx_table[idx]
    except IndexError:
        return None, None, None

    # Case where level exist in TOPBASE but there is no photoionization table
    if int(ntot) == 0:
        return None, None, None

    # Extract photoionization from binary file by direct access
    e_ryd0, x_Mb0 = extract_px(OFILE4, i, ntot)
    e_eV0 = e_ryd0 * 13.6057  # energy in eV

    # Exclude null values
    e_eV = pl.compress(x_Mb0 != 0.0, e_eV0)
    x_Mb = pl.compress(x_Mb0 != 0.0, x_Mb0)

    return idx, e_eV, x_Mb
示例#4
0
def completeness():#measure completeness on final image
	
    #combinepath=bcdpath+'pbcd/Combine/output_apex_step2'
    combinepath=bcdpath+'pbcd/Combine/apex_1frame_step2'
    os.chdir(combinepath)

    file='mosaic_extract_final.tbl'
    input=open(file,'r')
    xgal=[]#positions of previous detections with snr > 3
    ygal=[]
    fap4gal=[]
    for line in input:
	if line.find('#') > -1: #skip lines with '#' in them
	    continue
	if line.find('\\') > -1: #skip lines with '#' in them
	    continue
	if line.find('|') > -1: #skip lines with '#' in them
	    continue
	t=line.split()
	xgal.append(float(t[8]))
	ygal.append(float(t[10]))
	fap4gal.append(float(t[28]))
    input.close()
    xgal=N.array(xgal,'f')
    ygal=N.array(ygal,'f')


    fsimall=[]
    matchflagsimall=[]
    f2all=[]
    f3all=[]
    f4all=[]
    deblendsimall=[]
    snrsimall=[]

    myminmag=24.75
    mymaxmag=27.4
    myfmin=10.**((25.-mymaxmag)/2.5)#ZP=25
    myfmax=10.**((25.-myminmag)/2.5)#ZP=25


    #below is loop to create image w/artificial sources, extract, and compare

    for k in range(100):
	    createflag=1.#create image w/artificial sources?
	    detectflag=1.#detect sources in image?
	    if createflag > 0.1:
		    xnoise=[]
		    ynoise=[]
		    infile=open('noisecoords.dat','r')
		    for line in infile:
			    t=line.split()
			    xnoise.append(float(t[0]))
			    ynoise.append(float(t[1]))
	    infile.close()
	
	
	    nstar=10
    
	    xsim=N.zeros(nstar,'d')
	    ysim=N.zeros(nstar,'d')
	    msim=N.zeros(nstar,'d')
	    outfile=open('stars.coords.dat','w')
	    for i in range(nstar):
	    #j=int(round(1.*len(xnoise)*random.uniform(0,1)))

	    #xsim[i]=xnoise[j]
	    #ysim[i]=ynoise[j]
		    j=0
		    for j in range(10000):
			    xt=int(round(random.uniform(5.,125.)))
			    yt=int(round(random.uniform(5.,140.)))
			    d=pylab.sqrt((xt-xgal)**2+(yt-ygal)**2)#make sure sim galaxies are not near real galaxies
			    if min(d) > -1.:
				    d2=pylab.sqrt((xt-xsim)**2+(yt-ysim)**2)#make sure sim points are not on top of each other
				    if min(d2) > 5.:
					    print i,'got a good point after ',j,' tries',xt,yt
					    break
				    j=j+1
		    xsim[i]=xt
		    ysim[i]=yt
		    k=random.uniform(myfmin,myfmax)
		    msim[i]=25.-2.5*pylab.log10(k)
	    #print k,msim[i] 
		    s='%8.2f %8.2f %8.2f \n' % (xsim[i],ysim[i],msim[i])
		    outfile.write(s)
	    outfile.close()
	      
	
	#os.system('rm stars.coords.dat')
	#iraf.starlist('stars.coords.dat',nstars=100,spatial='uniform',xmax=130,ymax=145,luminosity='uniform',minmag=22.,maxmag=30.0,mzero=22.0,sseed='INDEF',power=0.6,alpha=0.74,lseed='INDEF')
	
    
	    os.system('rm mosaic-completeness.fits')
        #iraf.mkobjects(input='mosaic_minus_median_extract.fits',output='mosaic-completeness.fits',objects='stars.coords.dat',radius=1.13,magzero=25.,background=0.,gain=5.,rdnoise=0.,poisson='no')#don't convolve w/PRF
	    #os.system('cp ../cal/MIPS24_PRF_HD_center.fits .')#convolve star w/SSC PRF
	    os.system('cp ../cal/mips24_prf_mosaic_2.45_4x.fits .')#convolve star w/SSC PRF
	    iraf.mkobjects(input='mosaic_minus_median_extract.fits',output='mosaic-completeness.fits',objects='stars.coords.dat',radius=14,star='mips24_prf_mosaic_2.45_4x.fits',magzero=25.,background=0.,gain=5.,rdnoise=0.,poisson='no')
        #os.system('cp ../cal/PRF_estimate.fits .')#convolve gaussian w/measured PRF
        #iraf.mkobjects(input='mosaic_minus_median_extract.fits',output='mosaic-completeness.fits',objects='stars.coords.dat',radius=15,star='PRF_estimate.fits',magzero=25.,background=0.,gain=5.,rdnoise=0.,poisson='no')
	    os.system('ls *.fits')
	    os.system('pwd')
	    iraf.display('mosaic_minus_median_extract.fits',1,contrast=0.01)
	    iraf.display('mosaic-completeness.fits',2,contrast=0.01)
	    iraf.tvmark(1,'stars.coords.dat')
	    iraf.tvmark(2,'stars.coords.dat')
	    fsim=10.**((25.-msim)/2.5)#ZP=25

	    if createflag < .1:#read in positions and magnitudes of artdata sources
		    xsim=[]
		    ysim=[]
		    msim=[]
		    infile=open('stars.coords.dat','r')
		    for line in infile:
			    if line.find('#') > -1:
				    continue
			    t=line.split()
			    xsim.append(float(t[0]))
			    ysim.append(float(t[1]))
			    msim.append(float(t[2]))
		    infile.close()
		    xsim=N.array(xsim,'f')
		    ysim=N.array(ysim,'f')
		    msim=N.array(msim,'f')
		    
		    fsim=10.**((25.-msim)/2.5)#ZP=25

	    if detectflag > 0.1:#now run detection on mosaic-completeness.fits
		    combinepath=bcdpath+'pbcd/Combine/'
		    os.chdir(combinepath)
		    print combinepath
		    #os.system('apex_1frame.pl -n apex_1frame_MIPS24_step2.nl -i output_apex_step2/mosaic-completeness.fits')
	
		    #combinepath=bcdpath+'pbcd/Combine/output_apex_step2'
		    os.system('apex_1frame.pl -n apex_1frame_step2all.nl -i apex_1frame_step2/mosaic-completeness.fits')
	
		    combinepath=bcdpath+'pbcd/Combine/apex_1frame_step2'
		    os.chdir(combinepath)
		    print combinepath
		    file='mosaic-completeness_extract_raw.tbl'
		    input=open(file,'r')
		    ngal=0
		    for line in input:
			    if line.find('Conversion') > -1:
				    t=line.split('=')
				    convfactor=float(t[1])#conversion from ADU to uJy
  	    #aperr=aveaperr*convfactor #convert noise in ADU to uJy using conv factor from apex
				    print "Conversion Factor = ",convfactor
	    #print "aveaperr = ",aveaperr
	    #print "aperr = ",aperr
				    continue
			    if line.find('#') > -1: #skip lines with '#' in them
				    continue
			    if line.find('\\') > -1: #skip lines with '#' in them
				    continue
			    if line.find('|') > -1: #skip lines with '#' in them
				    continue
			    ngal=ngal+1
		    input.close()
    
	

	    id24 = N.zeros(ngal,'f')
	    imagex24 = N.zeros(ngal,'f')
	    imagey24  = N.zeros(ngal,'f')
	    ra24 = N.zeros(ngal,'f')
	    dec24 = N.zeros(ngal,'f')
	    f24 = N.zeros(ngal,'d')#flux
	    errf24 = N.zeros(ngal,'d')
	    fap1 = N.zeros(ngal,'d')#flux in aperture 1 (1,1.5,2,2.6,3,3.5,4,4.5,5.,5.5) pixels
	    fap2 = N.zeros(ngal,'d')#flux
	    fap3 = N.zeros(ngal,'d')#flux
	    fap4 = N.zeros(ngal,'d')#flux in ap 4 - this is one w/ap cor of 1.67 (Calzetti et al 2007)
	    fap5 = N.zeros(ngal,'d')#flux
	    fap6 = N.zeros(ngal,'d')#flux
	    fap7 = N.zeros(ngal,'d')#flux
	    fap8 = N.zeros(ngal,'d')#flux
	    fap9 = N.zeros(ngal,'d')#flux
	    fap10 = N.zeros(ngal,'d')#flux
	    snr24 = N.zeros(ngal,'d')#SNR calculated by mopex
	    deblend = N.zeros(ngal,'f')#SNR calculated by mopex
	    

	    input=open(file,'r')
	    i=0
	    output=open('xy24raw.dat','w')
	    for line in input:
		    if line.find('#') > -1: #skip lines with '#' in them
			    continue
		    if line.find('\\') > -1: #skip lines with '#' in them
			    continue
		    if line.find('|') > -1: #skip lines with '#' in them
			    continue
	 
	
		    t=line.split()
	#print "length of t = ",len(t)
	#print (t[8]),(t[10]),(t[13]),(t[14]),(t[18]),(t[2]),(t[23]),(t[24]),(t[25]),(t[26]),(t[27]),(t[28]),(t[29]),(t[30]),(t[31]),(t[32])

		    (imagex24[i],imagey24[i],f24[i],errf24[i],snr24[i],deblend[i],fap1[i],fap2[i],fap3[i],fap4[i],fap5[i],fap6[i],fap7[i],fap8[i],fap9[i],fap10[i])=(float(t[8]),float(t[10]),float(t[13]),float(t[14]),float(t[18]),float(t[2]),float(t[25]),float(t[26]),float(t[27]),float(t[28]),float(t[29]),float(t[30]),float(t[31]),float(t[32]),float(t[33]),float(t[34]))
		    s='%6.2f %6.2f \n'%(imagex24[i],imagey24[i])
		    output.write(s)

		    i=i+1
	    input.close()#44 -> 43
	    output.close()
	    iraf.tvmark(1,'xy24raw.dat',color=204,radi=2)
	    iraf.tvmark(2,'xy24raw.dat',color=204,radi=2)
    
	    delta=1.#max number of pixels for a match

	    #get rid of objects that were detected in original image.  Otherwise, matching will think any object near a sim galaxy is the sim galaxy.  A faint galaxy placed on type of a pre-existing bright galaxy will be detected.

            newgalflag=N.ones(len(imagex24),'i')
	    for i in range(len(imagex24)):
		    (imatch, matchflag,nmatch)=findnearest(imagex24[i],imagey24[i],xgal,ygal,delta)
		    if matchflag > 0.:
			    dflux=abs(fap4gal[imatch] - fap4[i])/fap4[i]
			    if dflux < .1:#position of real galaxy, flux difference less than 10% -> not a new galaxy
				    newgalflag[i] = 0
	    #keep only galaxies that are new
	    imagex24 = N.compress(newgalflag,imagex24)
	    imagey24  = N.compress(newgalflag,imagey24)
	    fap1 = N.compress(newgalflag,fap1)
	    fap2 = N.compress(newgalflag,fap2)
	    fap3 = N.compress(newgalflag,fap3)
	    fap4 = N.compress(newgalflag,fap4)
	    fap5 = N.compress(newgalflag,fap5)
	    fap6 = N.compress(newgalflag,fap6)
	    fap7 = N.compress(newgalflag,fap7)
	    fap8 = N.compress(newgalflag,fap8)
	    fap9 = N.compress(newgalflag,fap9)
	    fap10 =N.compress(newgalflag,fap10)
	    snr24 =N.compress(newgalflag,snr24)
	    deblend = N.compress(newgalflag,deblend)

	    delta=2.#max number of pixels for a match
	    matchflagsim=N.zeros(len(xsim),'i')
	    fmeas1=N.zeros(len(xsim),'f')
	    fmeas2=N.zeros(len(xsim),'f')
	    fmeas3=N.zeros(len(xsim),'f')
	    fmeas4=N.zeros(len(xsim),'f')
	    fmeas5=N.zeros(len(xsim),'f')
	    fmeas6=N.zeros(len(xsim),'f')
	    fmeas7=N.zeros(len(xsim),'f')
	    fmeas8=N.zeros(len(xsim),'f')
	    fmeas9=N.zeros(len(xsim),'f')
	    fmeas10=N.zeros(len(xsim),'f')
	    fmeas24=N.zeros(len(xsim),'f')
	    deblendsim=N.zeros(len(xsim),'f')
	    snrsim=N.zeros(len(xsim),'f')
	    for i in range(len(xsim)):
		    (imatch, matchflag,nmatch)=findnearest(xsim[i],ysim[i],imagex24,imagey24,delta)
		    matchflagsim[i]=matchflag
		    if matchflag > .1:
			    fmeas1[i]=fap1[int(imatch)]
			    fmeas2[i]=fap2[int(imatch)]
			    fmeas3[i]=fap3[int(imatch)]
			    fmeas4[i]=fap4[int(imatch)]
			    fmeas5[i]=fap5[int(imatch)]
			    fmeas6[i]=fap6[int(imatch)]
			    fmeas7[i]=fap7[int(imatch)]
			    fmeas8[i]=fap8[int(imatch)]
			    fmeas9[i]=fap9[int(imatch)]
			    fmeas10[i]=fap10[int(imatch)]
			    fmeas24[i]=f24[int(imatch)]
			    deblendsim[i]=deblend[int(imatch)]
			    snrsim[i]=snr24[int(imatch)]
			    



	    fsimall=fsimall+list(fsim)
	    matchflagsimall=matchflagsimall+list(matchflagsim)
	    f2all=f2all+list(fmeas2)
	    f3all=f3all+list(fmeas3)
	    f4all=f4all+list(fmeas4)
	    deblendsimall=deblendsimall+list(deblendsim)
	    snrsimall=snrsimall+list(snrsim)


    fsim=N.array(fsimall,'f')
    matchflagsim=N.array(matchflagsimall,'f')
    fmeas2=N.array(f2all,'f')
    fmeas3=N.array(f3all,'f')
    fmeas4=N.array(f4all,'f')
    deblendsim=N.array(deblendsimall,'f')
    snrsim=N.array(snrsimall,'f')


    #make plots using all realizations 
    pylab.cla()
    pylab.clf()
    fsim=fsim*convfactor
    fs=pylab.compress((matchflagsim > 0.1) & (deblendsim < 1.5),fsim)
    #f1=pylab.compress((matchflagsim > 0.1) & (deblendsim < 1.5),fmeas1)
    f2=pylab.compress((matchflagsim > 0.1) & (deblendsim < 1.5),fmeas2)
    f3=pylab.compress((matchflagsim > 0.1) & (deblendsim < 1.5),fmeas3)
    f4=pylab.compress((matchflagsim > 0.1) & (deblendsim < 1.5),fmeas4)
    #f242=pylab.compress((matchflagsim > 0.1) & (deblendsim < 1.5),fmeas24)

    r4=pylab.median(fs/f4)
    r3=pylab.median(fs/f3)
    r2=pylab.median(fs/f2)
    print "average ratios ap 4",pylab.average(fs/f4),r4,pylab.std((fs/f4)/pylab.average(fs/f2))
    print "average ratios ap 3",pylab.average(fs/f3),pylab.median(fs/f3),pylab.std((fs/f3)/pylab.average(fs/f3))
    print "average ratios ap 2",pylab.average(fs/f2),pylab.median(fs/f2),pylab.std((fs/f2)/pylab.average(fs/f2))

    s='f4 w/apcor = %3.2f(%4.2f)'%(r4,pylab.average(abs(fs-f4*r4)/fs))
    pylab.plot(fs,f4*r4,'b.',label=s)
    pylab.plot(fs,f4,'bo',label='f4')
    s='f3 w/apcor = %3.2f(%4.2f)'%(r3,pylab.average(abs(fs-f3*r3)/fs))
    pylab.plot(fs,f3*r3,'g.',label=s)
    pylab.plot(fs,f3,'go',label='f3')
    s='f2 w/apcor = %3.2f(%4.2f)'%(r2,pylab.average(abs(fs-f2*r2)/fs))
    pylab.plot(fs,f2*r2,'r.',label=s)
    pylab.plot(fs,f2,'ro',label='f2')
    #pylab.plot(fs,f1,'co',label='f1')
    #pylab.plot(fs,f242,'k.',label='f24')
    pylab.legend(loc='best')
    x=N.arange(0.,max(fs),10.)
    y=x
    pylab.plot(x,y,'k-')
    #y=2.*x
    #pylab.plot(x,y,'k--')
    #y=3.*x
    #pylab.plot(x,y,'k--')
    #y=4.*x
    #pylab.plot(x,y,'k--')
    #y=5.*x
    #pylab.plot(x,y,'k--')
    pylab.xlabel('F(24) Input')
    pylab.ylabel('F(24) measured')
    #pylab.axis([0.,50.,0.,50.])
    s=str(prefix)+'fluxcomp.eps'
    pylab.savefig(s)

    pylab.cla()
    pylab.clf()


    nbins=20
    fmin=10.#min(fsim)
    fmax=max(fsim)
    df=5.#(fmax-fmin)/(1.*nbins)
    bins=N.arange(fmin,(fmax+df),df)



    (xbin,ybin,ybinerr)=mystuff.completeness(bins,fsim,matchflagsim)
    s=str(prefix)+'FracComplvsFlux.dat'
    outdat=open(s,'w')
    print "Completeness vs Input Flux"
    for i in range(len(xbin)):
	    print i, xbin[i],ybin[i],ybinerr[i]
	    t='%8.2f %8.4f %8.4f\n'%(xbin[i],ybin[i],ybinerr[i])
	    outdat.write(t)
    outdat.close()
    #for i in range(len(fsim)):
	#if snrsim[i] > 3.:
	#    print i, fsim[i],matchflagsim[i],deblendsim[i],abs(fsim[i]-fmeas4[i]*1.67)/fsim[i],snrsim[i]
    #(xbin,ybin2,ybin2err)=mystuff.scipyhist2(bins,fmeas4)
    #pylab.plot(xbin,ybin,'bo')
    #pylab.plot(xbin,ybin2,'ro')
    #s=str(prefix)+'NDetectvsFlux.eps'
    #pylab.savefig(s)

    pylab.cla()
    pylab.clf()
    pylab.plot(xbin,ybin,'ko')
    pylab.errorbar(xbin,ybin,yerr=ybinerr,fmt=None,ecolor='k')
    s=str(prefix)+'FracComplvsFlux.eps'
    pylab.axhline(y=1.0,ls='-')
    pylab.axhline(y=.8,ls='--')
    pylab.axvline(x=80.0,ls=':',color='b')
    pylab.xlabel('Input Flux (uJy)')
    pylab.ylabel('Completeness')
    pylab.axis([0.,max(xbin)+df,-.05,1.05])

    pylab.savefig(s)
    
    os.system('cp *.eps /Users/rfinn/clusters/spitzer/completeness/.')
    os.system('cp *vsFlux.dat /Users/rfinn/clusters/spitzer/completeness/.')
示例#5
0
    def drawmeridians(self,meridians,color='k',linewidth=1., \
                      linestyle='--',dashes=[1,1],labels=[0,0,0,0],\
                      font='rm',fontsize=12):
        """
 draw meridians (longitude lines).

 meridians - list containing longitude values to draw (in degrees).
 color - color to draw meridians (default black).
 linewidth - line width for meridians (default 1.)
 linestyle - line style for meridians (default '--', i.e. dashed).
 dashes - dash pattern for meridians (default [1,1], i.e. 1 pixel on,
  1 pixel off).
 labels - list of 4 values (default [0,0,0,0]) that control whether
  meridians are labelled where they intersect the left, right, top or 
  bottom of the plot. For example labels=[1,0,0,1] will cause meridians
  to be labelled where they intersect the left and bottom of the plot,
  but not the right and top. Labels are drawn using mathtext.
 font - mathtext font used for labels ('rm','tt','it' or 'cal', default 'rm').
 fontsize - font size in points for labels (default 12).
        """
        # get current axes instance.
        ax = pylab.gca()
        # don't draw meridians past latmax, always draw parallel at latmax.
        latmax = 80. # not used for cyl, merc projections.
        # offset for labels.
	yoffset = (self.urcrnry-self.llcrnry)/100./self.aspect
	xoffset = (self.urcrnrx-self.llcrnrx)/100.

        if self.projection not in ['merc','cyl']:
            lats = pylab.arange(-latmax,latmax+1).astype('f')
        else:
            lats = pylab.arange(-90,91).astype('f')
        xdelta = 0.1*(self.xmax-self.xmin)
        ydelta = 0.1*(self.ymax-self.ymin)
        for merid in meridians:
            lons = merid*pylab.ones(len(lats),'f')
            x,y = self(lons,lats)
            # remove points outside domain.
            testx = pylab.logical_and(x>=self.xmin-xdelta,x<=self.xmax+xdelta)
            x = pylab.compress(testx, x)
            y = pylab.compress(testx, y)
            testy = pylab.logical_and(y>=self.ymin-ydelta,y<=self.ymax+ydelta)
            x = pylab.compress(testy, x)
            y = pylab.compress(testy, y)
            if len(x) > 1 and len(y) > 1:
                # split into separate line segments if necessary.
                # (not necessary for mercator or cylindrical).
                xd = (x[1:]-x[0:-1])**2
                yd = (y[1:]-y[0:-1])**2
                dist = pylab.sqrt(xd+yd)
                split = dist > 500000.
                if pylab.asum(split) and self.projection not in ['merc','cyl']:
                   ind = (pylab.compress(split,pylab.squeeze(split*pylab.indices(xd.shape)))+1).tolist()
                   xl = []
                   yl = []
                   iprev = 0
                   ind.append(len(xd))
                   for i in ind:
                       xl.append(x[iprev:i])
                       yl.append(y[iprev:i])
                       iprev = i
                else:
                    xl = [x]
                    yl = [y]
                # draw each line segment.
                for x,y in zip(xl,yl):
                    # skip if only a point.
                    if len(x) > 1 and len(y) > 1:
                        l = Line2D(x,y,linewidth=linewidth,linestyle=linestyle)
                        l.set_color(color)
                        l.set_dashes(dashes)
                        ax.add_line(l)
        # draw labels for meridians.
        # search along edges of map to see if parallels intersect.
        # if so, find x,y location of intersection and draw a label there.
        if self.projection == 'cyl':
            dx = 0.01; dy = 0.01
        else:
            dx = 1000; dy = 1000
        for dolab,side in zip(labels,['l','r','t','b']):
            if not dolab: continue
            # for cyl or merc, don't draw meridians on left or right.
            if self.projection in ['cyl','merc'] and side in ['l','r']: continue
            if side in ['l','r']:
	        nmax = int((self.ymax-self.ymin)/dy+1)
                if self.urcrnry < self.llcrnry:
	            yy = self.llcrnry-dy*pylab.arange(nmax)
                else:
	            yy = self.llcrnry+dy*pylab.arange(nmax)
                if side == 'l':
	            lons,lats = self(self.llcrnrx*pylab.ones(yy.shape,'f'),yy,inverse=True)
                else:
	            lons,lats = self(self.urcrnrx*pylab.ones(yy.shape,'f'),yy,inverse=True)
                lons = pylab.where(lons < 0, lons+360, lons)
                lons = [int(lon*10) for lon in lons.tolist()]
                lats = [int(lat*10) for lat in lats.tolist()]
            else:
	        nmax = int((self.xmax-self.xmin)/dx+1)
                if self.urcrnrx < self.llcrnrx:
	            xx = self.llcrnrx-dx*pylab.arange(nmax)
                else:
	            xx = self.llcrnrx+dx*pylab.arange(nmax)
                if side == 'b':
	            lons,lats = self(xx,self.llcrnry*pylab.ones(xx.shape,'f'),inverse=True)
                else:
	            lons,lats = self(xx,self.urcrnry*pylab.ones(xx.shape,'f'),inverse=True)
                lons = pylab.where(lons < 0, lons+360, lons)
                lons = [int(lon*10) for lon in lons.tolist()]
                lats = [int(lat*10) for lat in lats.tolist()]
            for lon in meridians:
                if lon<0: lon=lon+360.
                # find index of meridian (there may be two, so
                # search from left and right).
                try:
                    nl = lons.index(int(lon*10))
                except:
                    nl = -1
                try:
                    nr = len(lons)-lons[::-1].index(int(lon*10))-1
                except:
                    nr = -1
        	if lon>180:
        	    lonlab = r'$\%s{%g\/^{\circ}\/W}$'%(font,pylab.fabs(lon-360))
        	elif lon<180 and lon != 0:
        	    lonlab = r'$\%s{%g\/^{\circ}\/E}$'%(font,lon)
        	else:
        	    lonlab = r'$\%s{%g\/^{\circ}}$'%(font,lon)
                # meridians can intersect each map edge twice.
                for i,n in enumerate([nl,nr]):
                    lat = lats[n]/10.
                    # no meridians > latmax for projections other than merc,cyl.
                    if self.projection not in ['merc','cyl'] and lat > latmax: continue
                    # don't bother if close to the first label.
                    if i and abs(nr-nl) < 100: continue
                    if n >= 0:
                        if side == 'l':
        	            pylab.text(self.llcrnrx-xoffset,yy[n],lonlab,horizontalalignment='right',verticalalignment='center',fontsize=fontsize)
                        elif side == 'r':
        	            pylab.text(self.urcrnrx+xoffset,yy[n],lonlab,horizontalalignment='left',verticalalignment='center',fontsize=fontsize)
                        elif side == 'b':
        	            pylab.text(xx[n],self.llcrnry-yoffset,lonlab,horizontalalignment='center',verticalalignment='top',fontsize=fontsize)
                        else:
        	            pylab.text(xx[n],self.urcrnry+yoffset,lonlab,horizontalalignment='center',verticalalignment='bottom',fontsize=fontsize)

        # make sure axis ticks are turned off
        ax.set_xticks([]) 
        ax.set_yticks([])
        # set axes limits to fit map region.
        self.set_axes_limits()
示例#6
0
def select_ll(llfile, lbdmin, lbdmax, lbdrange):
    '''Select lines to show on the plot by vertical lines
    '''

    bla = False

    try:
        llfile = pl.loadtxt(llname, dtype='str', comments='#', delimiter='\n')
    except IOError:
        print "Linelist file does not exist."
        quit(1)
        #return None, None, None

    elt_ll = [line[0:9] for line in llfile]
    lbd_ll = [float(line[9:18]) for line in llfile]

    try:
        lgf_ll = [float(line[18:]) for line in llfile]
    except:
        lgf_ll = None

    elt_ll = pl.asarray(elt_ll)
    lbd_ll = pl.asarray(lbd_ll)
    lgf_ll = pl.asarray(lgf_ll)

    crit = pl.logical_and(lbd_ll <= lbdmax, lbd_ll >= lbdmin)

    elt_ll_set = pl.compress(crit, elt_ll)
    lbd_ll_set = pl.compress(crit, lbd_ll)

    try:
        lgf_ll_set = pl.compress(crit, lgf_ll)
    except:
        lgf_ll_set = None

    nid = lbd_ll_set.size
    dlbd_ll_set = lbd_ll_set[1:] - lbd_ll_set[0:nid - 1]
    dlbd_mean = pl.mean(dlbd_ll_set)

    if bla:
        print ""
        print "  Number of line identification in the selected range:", nid
        print "  Number of identification/Å:", (nid /
                                                lbdrange).__format__('7.2f')
        print "  Mean interval [Å]:         ", dlbd_mean.__format__('7.2f')

    loggfmin = -1

    while nid > 100:
        crit = lgf_ll_set > loggfmin
        if not crit:
            break

        elt_ll_set = pl.compress(crit, elt_ll_set)
        lbd_ll_set = pl.compress(crit, lbd_ll_set)
        lgf_ll_set = pl.compress(crit, lgf_ll_set)

        nid = lbd_ll_set.size

        dlbd_ll_set = lbd_ll_set[1:] - lbd_ll_set[0:nid - 1]
        dlbd_mean = pl.mean(dlbd_ll_set)

        if bla:
            print "Number of line with log gf >", loggfmin, "to display:", nid
            print "Number of identification/Å:", nid / lbdrange
            print "Mean interval [Å]:", dlbd_mean

        loggfmin = loggfmin + 0.2

    return elt_ll_set, lbd_ll_set, dlbd_mean
示例#7
0
def select_data(i, data, lbdmin, lbdmax, obslist, lbdunitlist, hshiftlist):
    '''Selection of spectrum in a given wavelength range
    '''

    bla = False
    global p

    # Case where wavelength are not in Angstrom but in nm
    if lbdunitlist[i] in ['a', 'Å', 'A', '', None]:
        pass
    elif lbdunitlist[i] == 'nm':
        data[:, 0] = data[:, 0] * 10
    else:
        print "Wavelength unit unknown. Try Å|A|a or nm."
        quit(1)

    if hshiftlist[i] in [None, '']:
        hshift = 0.0
    else:
        hshift = float(hshiftlist[i])

    data[:, 0] = data[:, 0] + hshift

    crit = pl.logical_and(data[:, 0] <= lbdmax, data[:, 0] >= lbdmin)

    if not crit.any():
        # Case of 1 spectrum only without any idea of wavelength unit
        #if len(obslist) == 1:
        data[:, 0] = data[:, 0] * 10
        crit = pl.logical_and(data[:, 0] <= lbdmax, data[:, 0] >= lbdmin)
        print "wavelenght of the spectrum are outside the selection range."
        print "Maybe wavelengths are in nm ... convert wavelengths in Å and try again..."
        if not crit.any():
            #      print "Wavelength range outside the spectrum."
            #      quit(1)
            # Skip this spectrum
            #else:
            print "Required (or default) wavelength range outside of this spectrum."
            print " Number of wavelength points:", len(
                data[:, 0]).__format__('11g')
            print "  Lambda min:                ", min(data[:, 0] /
                                                       10).__format__('9.3f')
            print "  Lambda max:                ", max(data[:, 0] /
                                                       10).__format__('9.3f')
            return None, None, p

    if bla:
        print " Number of wavelength points:", len(data[:,
                                                        0]).__format__('11g')
        print "  Lambda min:                ", min(data[:,
                                                        0]).__format__('9.3f')
        print "  Lambda max:                ", max(data[:,
                                                        0]).__format__('9.3f')

    x = pl.compress(crit, data[:, 0])
    y = pl.compress(crit, data[:, 1])

    # Case where there is a third column in input ASCII data considered as the absolute flux
    if abs_flux:
        try:
            y = pl.compress(crit, data[:, 2])
        except:
            pass

    if bla:
        print " Number of selected points:  ", len(x).__format__('11g')
        print "  Selected lambda min:       ", min(x).__format__('9.3f')
        print "  Selected lambda max:       ", max(x).__format__('9.3f')
        if hshift != 0.0:
            print " Including a shift of:       ", hshift.__format__('9.3f')

    #Flag to plot spectra
    p = True

    return x, y, p
示例#8
0
    
if dumb.shape[1] == 4:
    qm_levels = [Level(e=e, g=g, cfg=cfg, term=term, ref=QMP) for e, g, cfg, term in dumb]
elif dumb.shape[1] == 6:
    qm_levels = [Level(e=e, g=g, cfg=cfg, term=term, p=p, ref=ref) for e, g, cfg, term, p, ref in dumb]
else:
    print("Input format problem.")
    quit(1)  


i, e, x = id_uniq_qm_px(level, qm_levels, idx_table, CUTOFF, DE, px_bin_fn)

e_us, x_us, area_i, area_is = px_smooth(i, e, x, idx_table, N_HE0, N_US, N_US_HE, WC)

# Remove negative cross-sections
ef = pl.compress( x_us > 0., e_us)
xf = pl.compress( x_us > 0., x_us)

plot_px(ef, xf, [area_i,], [area_is,], level, qm_levels, idx_table, ((i,e,x),), N_HE0, MPI)




# def smooth(x,wc):
#    ''' Smooth data x by a third order Butterworth low-band filter characterized by a cut frequency wc [rad/s] 
#    ''' 
#    b,a = ss.butter(3, wc)
#    return ss.filtfilt(b, a, x)
# 
# # Convert photoionization tables of all the levels in binary format
# #format_px(flag, px_org_fn, idx_fn, px_bin_fn)
示例#9
0
    def drawmeridians(self,meridians,color='k',linewidth=1., \
                      linestyle='--',dashes=[1,1],labels=[0,0,0,0],\
                      font='rm',fontsize=12):
        """
 draw meridians (longitude lines).

 meridians - list containing longitude values to draw (in degrees).
 color - color to draw meridians (default black).
 linewidth - line width for meridians (default 1.)
 linestyle - line style for meridians (default '--', i.e. dashed).
 dashes - dash pattern for meridians (default [1,1], i.e. 1 pixel on,
  1 pixel off).
 labels - list of 4 values (default [0,0,0,0]) that control whether
  meridians are labelled where they intersect the left, right, top or 
  bottom of the plot. For example labels=[1,0,0,1] will cause meridians
  to be labelled where they intersect the left and bottom of the plot,
  but not the right and top. Labels are drawn using mathtext.
 font - mathtext font used for labels ('rm','tt','it' or 'cal', default 'rm').
 fontsize - font size in points for labels (default 12).
        """
        # get current axes instance.
        ax = pylab.gca()
        # don't draw meridians past latmax, always draw parallel at latmax.
        latmax = 80. # not used for cyl, merc projections.
        # offset for labels.
	yoffset = (self.urcrnry-self.llcrnry)/100./self.aspect
	xoffset = (self.urcrnrx-self.llcrnrx)/100.

        if self.projection not in ['merc','cyl']:
            lats = pylab.arange(-latmax,latmax+1).astype('f')
        else:
            lats = pylab.arange(-90,91).astype('f')
        xdelta = 0.1*(self.xmax-self.xmin)
        ydelta = 0.1*(self.ymax-self.ymin)
        for merid in meridians:
            lons = merid*pylab.ones(len(lats),'f')
            x,y = self(lons,lats)
            # remove points outside domain.
            testx = pylab.logical_and(x>=self.xmin-xdelta,x<=self.xmax+xdelta)
            x = pylab.compress(testx, x)
            y = pylab.compress(testx, y)
            testy = pylab.logical_and(y>=self.ymin-ydelta,y<=self.ymax+ydelta)
            x = pylab.compress(testy, x)
            y = pylab.compress(testy, y)
            if len(x) > 1 and len(y) > 1:
                # split into separate line segments if necessary.
                # (not necessary for mercator or cylindrical).
                xd = (x[1:]-x[0:-1])**2
                yd = (y[1:]-y[0:-1])**2
                dist = pylab.sqrt(xd+yd)
                split = dist > 500000.
                if pylab.asum(split) and self.projection not in ['merc','cyl']:
                   ind = (pylab.compress(split,pylab.squeeze(split*pylab.indices(xd.shape)))+1).tolist()
                   xl = []
                   yl = []
                   iprev = 0
                   ind.append(len(xd))
                   for i in ind:
                       xl.append(x[iprev:i])
                       yl.append(y[iprev:i])
                       iprev = i
                else:
                    xl = [x]
                    yl = [y]
                # draw each line segment.
                for x,y in zip(xl,yl):
                    # skip if only a point.
                    if len(x) > 1 and len(y) > 1:
                        l = Line2D(x,y,linewidth=linewidth,linestyle=linestyle)
                        l.set_color(color)
                        l.set_dashes(dashes)
                        ax.add_line(l)
        # draw labels for meridians.
        # search along edges of map to see if parallels intersect.
        # if so, find x,y location of intersection and draw a label there.
        if self.projection == 'cyl':
            dx = 0.01; dy = 0.01
        else:
            dx = 1000; dy = 1000
        for dolab,side in zip(labels,['l','r','t','b']):
            if not dolab: continue
            # for cyl or merc, don't draw meridians on left or right.
            if self.projection in ['cyl','merc'] and side in ['l','r']: continue
            if side in ['l','r']:
	        nmax = int((self.ymax-self.ymin)/dy+1)
                if self.urcrnry < self.llcrnry:
	            yy = self.llcrnry-dy*pylab.arange(nmax)
                else:
	            yy = self.llcrnry+dy*pylab.arange(nmax)
                if side == 'l':
	            lons,lats = self(self.llcrnrx*pylab.ones(yy.shape,'f'),yy,inverse=True)
                else:
	            lons,lats = self(self.urcrnrx*pylab.ones(yy.shape,'f'),yy,inverse=True)
                lons = pylab.where(lons < 0, lons+360, lons)
                lons = [int(lon*10) for lon in lons.tolist()]
                lats = [int(lat*10) for lat in lats.tolist()]
            else:
	        nmax = int((self.xmax-self.xmin)/dx+1)
                if self.urcrnrx < self.llcrnrx:
	            xx = self.llcrnrx-dx*pylab.arange(nmax)
                else:
	            xx = self.llcrnrx+dx*pylab.arange(nmax)
                if side == 'b':
	            lons,lats = self(xx,self.llcrnry*pylab.ones(xx.shape,'f'),inverse=True)
                else:
	            lons,lats = self(xx,self.urcrnry*pylab.ones(xx.shape,'f'),inverse=True)
                lons = pylab.where(lons < 0, lons+360, lons)
                lons = [int(lon*10) for lon in lons.tolist()]
                lats = [int(lat*10) for lat in lats.tolist()]
            for lon in meridians:
                if lon<0: lon=lon+360.
                # find index of meridian (there may be two, so
                # search from left and right).
                try:
                    nl = lons.index(int(lon*10))
                except:
                    nl = -1
                try:
                    nr = len(lons)-lons[::-1].index(int(lon*10))-1
                except:
                    nr = -1
        	if lon>180:
        	    lonlab = r'$\%s{%g\/^{\circ}\/W}$'%(font,pylab.fabs(lon-360))
        	elif lon<180 and lon != 0:
        	    lonlab = r'$\%s{%g\/^{\circ}\/E}$'%(font,lon)
        	else:
        	    lonlab = r'$\%s{%g\/^{\circ}}$'%(font,lon)
                # meridians can intersect each map edge twice.
                for i,n in enumerate([nl,nr]):
                    lat = lats[n]/10.
                    # no meridians > latmax for projections other than merc,cyl.
                    if self.projection not in ['merc','cyl'] and lat > latmax: continue
                    # don't bother if close to the first label.
                    if i and abs(nr-nl) < 100: continue
                    if n >= 0:
                        if side == 'l':
        	            pylab.text(self.llcrnrx-xoffset,yy[n],lonlab,horizontalalignment='right',verticalalignment='center',fontsize=fontsize)
                        elif side == 'r':
        	            pylab.text(self.urcrnrx+xoffset,yy[n],lonlab,horizontalalignment='left',verticalalignment='center',fontsize=fontsize)
                        elif side == 'b':
        	            pylab.text(xx[n],self.llcrnry-yoffset,lonlab,horizontalalignment='center',verticalalignment='top',fontsize=fontsize)
                        else:
        	            pylab.text(xx[n],self.urcrnry+yoffset,lonlab,horizontalalignment='center',verticalalignment='bottom',fontsize=fontsize)

        # make sure axis ticks are turned off
        ax.set_xticks([]) 
        ax.set_yticks([])
        # set axes limits to fit map region.
        self.set_axes_limits()
示例#10
0
def select_ll(llfile, lbdmin, lbdmax, lbdrange):
    '''Select lines to show on the plot by vertical lines
    '''

    bla = False

    try:
        llfile = pl.loadtxt(llname, dtype='str', comments='#', delimiter='\n')
    except IOError:
        print "Linelist file does not exist."
        quit(1)
        #return None, None, None

    elt_ll = [line[0:9] for line in llfile]
    lbd_ll = [float(line[9:18]) for line in llfile]

    try:
        lgf_ll = [float(line[18:]) for line in llfile]
    except:
        lgf_ll = None

    elt_ll = pl.asarray(elt_ll)
    lbd_ll = pl.asarray(lbd_ll)
    lgf_ll = pl.asarray(lgf_ll)

    crit = pl.logical_and(lbd_ll <= lbdmax, lbd_ll >= lbdmin)

    elt_ll_set = pl.compress(crit, elt_ll)
    lbd_ll_set = pl.compress(crit, lbd_ll)

    try:
        lgf_ll_set = pl.compress(crit, lgf_ll)
    except:
        lgf_ll_set = None

    nid = lbd_ll_set.size
    dlbd_ll_set = lbd_ll_set[1:] - lbd_ll_set[0:nid-1]
    dlbd_mean = pl.mean(dlbd_ll_set)

    if bla:
        print ""
        print "  Number of line identification in the selected range:", nid
        print "  Number of identification/Å:", (nid/lbdrange).__format__('7.2f')
        print "  Mean interval [Å]:         ", dlbd_mean.__format__('7.2f')

    loggfmin = -1

    while nid > 100:
        crit = lgf_ll_set > loggfmin
        if not crit:
            break

        elt_ll_set = pl.compress(crit, elt_ll_set)
        lbd_ll_set = pl.compress(crit, lbd_ll_set)
        lgf_ll_set = pl.compress(crit, lgf_ll_set)

        nid = lbd_ll_set.size

        dlbd_ll_set = lbd_ll_set[1:] - lbd_ll_set[0:nid-1]
        dlbd_mean = pl.mean(dlbd_ll_set)

        if bla:
            print "Number of line with log gf >", loggfmin, "to display:", nid
            print "Number of identification/Å:", nid/lbdrange
            print "Mean interval [Å]:", dlbd_mean

        loggfmin = loggfmin + 0.2

    return elt_ll_set, lbd_ll_set, dlbd_mean
示例#11
0
def select_data(i, data, lbdmin, lbdmax, obslist, lbdunitlist, hshiftlist):
    '''Selection of spectrum in a given wavelength range
    '''

    bla = False
    global p

    # Case where wavelength are not in Angstrom but in nm
    if lbdunitlist[i] in ['a', 'Å', 'A', '', None]:
        pass
    elif lbdunitlist[i] == 'nm':
        data[:, 0] = data[:, 0]*10
    else:
        print "Wavelength unit unknown. Try Å|A|a or nm."
        quit(1)

    if hshiftlist[i] in [None, '']:
        hshift = 0.0
    else:
        hshift = float(hshiftlist[i])

    data[:, 0] = data[:, 0] + hshift

    crit = pl.logical_and(data[:, 0] <= lbdmax, data[:, 0] >= lbdmin)

    if not crit.any():
        # Case of 1 spectrum only without any idea of wavelength unit
        #if len(obslist) == 1:
        data[:, 0] = data[:, 0]*10
        crit = pl.logical_and(data[:, 0] <= lbdmax, data[:, 0] >= lbdmin)
        print "wavelenght of the spectrum are outside the selection range."
        print "Maybe wavelengths are in nm ... convert wavelengths in Å and try again..."
        if not crit.any():
        #      print "Wavelength range outside the spectrum."
        #      quit(1)
        # Skip this spectrum
        #else:
            print "Required (or default) wavelength range outside of this spectrum."
            print " Number of wavelength points:", len(data[:, 0]).__format__('11g')
            print "  Lambda min:                ", min(data[:, 0]/10).__format__('9.3f')
            print "  Lambda max:                ", max(data[:, 0]/10).__format__('9.3f')
            return None, None, p

    if bla:
        print " Number of wavelength points:", len(data[:, 0]).__format__('11g')
        print "  Lambda min:                ", min(data[:, 0]).__format__('9.3f')
        print "  Lambda max:                ", max(data[:, 0]).__format__('9.3f')

    x = pl.compress(crit, data[:, 0])
    y = pl.compress(crit, data[:, 1])

    # Case where there is a third column in input ASCII data considered as the absolute flux
    if abs_flux:
        try:
            y = pl.compress(crit, data[:, 2])
        except:
            pass

    if bla:
        print " Number of selected points:  ", len(x).__format__('11g')
        print "  Selected lambda min:       ", min(x).__format__('9.3f')
        print "  Selected lambda max:       ", max(x).__format__('9.3f')
        if hshift != 0.0:
            print " Including a shift of:       ", hshift.__format__('9.3f')

    #Flag to plot spectra
    p = True

    return x, y, p
示例#12
0
def pchip_init(x,y):
    
    """
    Evaluate the piecewise cubic Hermite interpolant with monoticity preserved
    
        x = array containing the x-data
        y = array containing the y-data
    
        x must be sorted low to high... (no repeats)
        y can have repeated values
    
        x input conditioning is assumed but not checked
    """
    
    n = len(x)

    delta = (y[1:] - y[:-1]) / (x[1:] - x[:-1])                                 # Compute the slopes of the secant lines between successive points

    m = P.zeros(n, dtype='d')                                                   # Initialize the tangents at every points as the average of the secants

    m[0] = delta[0]                                                             # At the endpoints - use one-sided differences
    m[n-1] = delta[-1]

    m[1:-1] = (delta[:-1] + delta[1:]) / 2.0                                    # In the middle - use the average of the secants

    """
    Special case: intervals where y[k] == y[k+1]

    Setting these slopes to zero guarantees the spline connecting
    these points will be flat which preserves monotonicity
    """
    
    indices_to_fix = P.compress((delta == 0.0), range(n))
    
#    print "zero slope indices to fix = ", indices_to_fix

    for ii in indices_to_fix:
        m[ii]   = 0.0
        m[ii+1] = 0.0

    alpha = m[:-1]/delta
    beta  = m[1:]/delta
    dist  = alpha**2 + beta**2
    tau   = 3.0 / P.sqrt(dist)

    """
    To prevent overshoot or undershoot, restrict the position vector
    (alpha, beta) to a circle of radius 3.  If (alpha**2 + beta**2)>9,
    then set m[k] = tau[k]alpha[k]delta[k] and m[k+1] = tau[k]beta[b]delta[k]
    where tau = 3/sqrt(alpha**2 + beta**2).
    """
    
    over = (dist > 9.0)                                                         # Find the indices that need adjustment
    indices_to_fix = P.compress(over, range(n))

#    print "overshoot indices to fix... = ", indices_to_fix

    for ii in indices_to_fix:
        m[ii]   = tau[ii] * alpha[ii] * delta[ii]
        m[ii+1] = tau[ii] * beta[ii]  * delta[ii]

    return m
示例#13
0
                    area_list.append(area_i)
                    area_is_list.append(area_is)
                    qm_cfg_list.append(qm_levels[i].cfg)
                    qm_term_list.append(qm_levels[i].term)
                    qm_e_list.append(qm_levels[i].e)
                    qm_g_list.append(qm_levels[i].g)
                else:
                    print("Photoionization not found for one of the (sub)level:", end=" ")
                    level.print()

            if i_list:
                nqmp += 1
                # Possible combination of photoionization for HyperLevel|SuperLevel or fraction for State
                ef, xf = px_combine(i_list, e_list, x_list, level, qm_levels)
                # Remove negative cross-sections
                ef = pl.compress(xf > 0.0, ef)
                xf = pl.compress(xf > 0.0, xf)

                # Compute the wavelength at the threshold and compare with limit LBD_LIM fixed by the user
                e_thres = Cst.H * Cst.C * 1.0e10 / Cst.Q / ef[0]
                x_thres = xf[0] * 1.0e-18
                if e_thres < LBD_LIM:
                    print("Warning: ionization energy corresponds to wavelenght lower than the limit:", LBD_LIM, " Å")

                # Remove cross-section for energies lower than LBD_LIM A
                ef = pl.compress(Cst.H * Cst.C * 1.0e10 / Cst.Q / ef >= LBD_LIM, ef)
                xf = pl.compress(Cst.H * Cst.C * 1.0e10 / Cst.Q / ef >= LBD_LIM, xf)

                # Plot photoionization cross-section for the selected level
                if PLOT:
                    plot_px(ef, xf, area_list, area_is_list, level, qm_levels, idx_table, dumb, N_HE0, MPI)