コード例 #1
0
ファイル: integrators.py プロジェクト: jakevdp/pyOrbits
def test_integrators(tmax=100,
                     Nsteps=1000,
                     x0=[0.0,1.0],
                     v0=[1.0,0.0]):
    mag = lambda x: numpy.sqrt(numpy.dot(x,x))
    fprime = lambda x: -x/mag(x)**3
    energy = lambda x,v: 0.5*(v**2).sum(1) - 1./numpy.sqrt((x**2).sum(1))

    pylab.figure(1)
    
    for (method,N) in ((Euler,Nsteps),
                       (DriftKick,Nsteps),
                       (LeapFrog,Nsteps),
                       (VelocityVerlet,Nsteps),
                       (RungeKutta,Nsteps/4)):
        t = numpy.linspace(0,tmax,N+1)
        x_t,v_t = method(fprime,x0,v0,tmax,N)
        E = energy(x_t,v_t)

        delta_E = abs((E-E[0])/E[0])
        
        pylab.loglog(t[1:],delta_E[1:],
                     label=method.__name__)
    
    pylab.ylim(1E-8,1E4)
    pylab.legend(loc=2)
    pylab.xlabel(r'$\mathdefault{t}$')
    pylab.ylabel(r'$\mathdefault{|\Delta E/E|}$')
    
    pylab.show()
コード例 #2
0
ファイル: convergence_study.py プロジェクト: jjakeman/pyheat
def plot_convergence_data_from_file( labels ):
    import pickle
    l2_error = pickle.load( open( 'l2-error.p', 'rb' ) )
    inf_error = pickle.load( open( 'inf-error.p', 'rb' ) )
    num_pts = pickle.load( open( 'num-grid-points.p', 'rb' ) )


    max_error = 0.
    min_error = 1.
    for i in xrange( len( l2_error ) ):
        pylab.figure( 1 )
        pylab.loglog( num_pts[i], l2_error[i], '-o', label = labels[i] )


        pylab.figure( 2 )
        pylab.loglog( num_pts[i], inf_error[i], '-o', label = labels[i] )

        max_error = max( max_error, numpy.array( l2_error[i] ).max() )
        min_error = min( min_error, numpy.array( l2_error[i] ).min() )

    #matplotlib.rcParams.update({'font.size': 16})
    pylab.figure( 1 )
    pylab.legend()
    pylab.xlabel(r'Number of grid points')
    pylab.ylabel(r'$\lVert f - \hat{f}\rVert_{\ell_2}$')#,fontsize=16)
    pylab.ylim(min_error/5.,5.*max_error)
    pylab.savefig('genz-corner-peak-10d-5e-1c-quartic-decay-l2-convergence.eps',dpi=1200)

    pylab.figure( 2 )
    #pylab.title(r'$\int_{-1}^1 f(x)$', fontsize=10)
    pylab.legend()
    pylab.xlabel(r'Number of grid points')
    pylab.ylabel(r'$\lVert I[f] - Q[f]\rVert_{\ell_2}$')
    pylab.ylim(1e-16,5*max_error)
コード例 #3
0
ファイル: rank_size.py プロジェクト: dudarev/datavis
def plot_point(rank1, style=None):
    if not style:
        style = "r--"
    # plot lines to point (rank1,pop1)
    pop1 = pop[rank1]
    P.loglog([rank1, rank1], [0.1, pop1], style)
    P.loglog([1, rank1], [pop1, pop1], style)
コード例 #4
0
ファイル: distrib.py プロジェクト: astrofanlee/project_TL
def expy(d):
    colors = ['b','g','r','c','m','y','k','b--','g--','r--','c--','m--','y--']
    multi = 2.**N.arange(2,3.1,1.)
    #multi = [77.]
    for i in range(len(multi)):
        dinflate = N.exp((d)*multi[i])-1.
        k,p = power.pk(dinflate)

        var = N.var((d)*multi[i])
        lognocor = ((N.exp(var)-1)*N.exp(var)/var)
        lognovar = (N.exp(var)-1)*N.exp(var)
        print 1+2*var,'lognocor:',lognocor
        #varlog = N.var(N.log(dinflate+1.).flatten())
        k,plog = power.pk(N.log(dinflate+1))
        print p[0]/plog[0], p[-1]/plog[-1]

        #M.subplot(121)
        M.loglog(k,p/plog/lognocor,colors[i])
        #M.subplot(122)

        #M.loglog(k,plog,colors[i])
        #M.loglog([k[0],k[-1]],[lognovar,lognovar],colors[i])
        #M.loglog(k,1./plog**mul,colors[i])
        

    
    #preal = M.load('mill/s63/pm.pnl.dat')
    #plogreal = M.load('mill/s63/plogm.pnl.dat')    
    #M.loglog(preal[:,0],preal[:,1]/plogreal[:,1],'b')

    #M.xlabel(r'$k\ [\rm{Mpc}/h]$',fontsize=20)
    #M.ylabel(r'$P_\delta(k)/P_{\log (1+\delta)}(k)$',fontsize=20)

    M.show()
コード例 #5
0
ファイル: wisecheck.py プロジェクト: bpartridge/tractor
def psfplots():
	tpsf = wise.get_psf_model(1, pixpsf=True)
	
	psfp = tpsf.getPointSourcePatch(0, 0)
	psf = psfp.patch
	
	psf /= psf.sum()
	
	plt.clf()
	plt.imshow(np.log10(np.maximum(1e-5, psf)), interpolation='nearest', origin='lower')
	plt.colorbar()
	ps.savefig()
	
	h,w = psf.shape
	cx,cy = w/2, h/2
	
	X,Y = np.meshgrid(np.arange(w), np.arange(h))
	R = np.sqrt((X - cx)**2 + (Y - cy)**2)
	plt.clf()
	plt.semilogy(R.ravel(), psf.ravel(), 'b.')
	plt.xlabel('Radius (pixels)')
	plt.ylabel('PSF value')
	plt.ylim(1e-8, 1.)
	ps.savefig()
	
	plt.clf()
	plt.loglog(R.ravel(), psf.ravel(), 'b.')
	plt.xlabel('Radius (pixels)')
	plt.ylabel('PSF value')
	plt.ylim(1e-8, 1.)
	ps.savefig()
	
	print('PSF norm:', np.sqrt(np.sum(np.maximum(0, psf)**2)))
	print('PSF max:', psf.max())
コード例 #6
0
ファイル: distrib.py プロジェクト: astrofanlee/project_TL
def showpplog(prefix,color,fact=1.,xis=1.,xislog=1.,sumto=10,camb=0,cellsize=0):
    p = M.load(prefix+'/pm.pnl.dat')
    plog = M.load(prefix+'/plogm.pnl.dat')

    # all times xis
    sump = N.sum(M.load(prefix+'c11')[:sumto,1]*p[:sumto,2])
    c21xis = N.sum(M.load(prefix+'c21')[:sumto,1]*p[:sumto,2])/sump
    c22xis = N.sum(M.load(prefix+'c22')[:sumto,1]*p[:sumto,2])/sump
    c31xis = N.sum(M.load(prefix+'c31')[:sumto,1]*p[:sumto,2])/sump
    
    #bias = N.sum(p[:sumto,1]*p[:sumto,2])/N.sum(plog[:sumto,1]*plog[:sumto,2])
    bias = N.sum((p[:sumto,1]/plog[:sumto,1]) * p[:sumto,2])/N.sum(p[:sumto,2])
    biaserror = N.std(p[:sumto,1]/plog[:sumto,1])
    simpleapprox = 1./(1.-0.44*xis)

    c21 = camb.c21(cellsize)
    c22 = camb.c22(cellsize)
    c31 = camb.c31(cellsize)
    s3 = camb.s3(cellsize)
    #print cellsize,c21, c21/xis
    approx = 1./(1+xis*(2.-c21))
    approx2 = 1./(1+xis*(2-c21)+xis**2*(7-2*s3-4*c21 + 2.*c31/3. + c22/4.))
    #print bias,simpleapprox,approx,approx2
    print bias,biaserror

    M.loglog([cellsize],[simpleapprox-1],'yo')
    M.loglog([cellsize],[approx-1],'rp')
    M.loglog([cellsize],[approx2-1.],'bh')
    M.loglog([cellsize],[fact-1.],'gD')
    M.loglog([cellsize],[bias-1],'k.')
コード例 #7
0
 def PlotDtFit(self, style="k:"):
     pylab.loglog(
         self.dt_values,
         self.scale_factor * self.dt_values ** self.exponent,
         style,
         label="$\sim \Delta t^{%.3f}$" % self.exponent,
     )
コード例 #8
0
def plotlengths(elements,energies=''):
    '''
    Plot attenuation lengths versus Energy
    '''
    import pylab

    densities = pickle.load(open('densities.dat','rb'))

    for element in elements:
        print 'Plotting',element
        try:
            rho = densities[element]
        except:
            print 'Unknown element, ', element
        else:
            [E,massatt] = getdata(element,Energies=energies)
            
            attlength = len(massatt) * [0]

            for i in range(len(massatt)):
                attlength[i] = 1.0 / (massatt[i] * rho)

            pylab.loglog(E,attlength,label=element)

    pylab.xlabel('Energy (MeV)')
    pylab.ylabel('Attenuation Length (cm)')
    pylab.legend(loc='lower right')
    pylab.grid()
    pylab.show()
コード例 #9
0
ファイル: extrapolate.py プロジェクト: BijanZarif/CBC.Solve
def extrapolate(n, y, tolerance=1e-15, plot=False, call_show=True):
    "Extrapolate functional value Y from sequence of values (n, y)."

    # Make sure we have NumPy arrays
    n = array(n)
    y = array(y)

    # Create initial "bound"
    Y0 = 0.99*y[-1]
    Y1 = 1.01*y[-1]

    # Compute initial interior points
    phi = (sqrt(5.0) + 1.0) / 2.0
    Y2 = Y1 - (Y1 - Y0) / phi
    Y3 = Y0 + (Y1 - Y0) / phi

    # Compute initial values
    F0, e, nn, ee, yy = _eval(n, y, Y0)
    F1, e, nn, ee, yy = _eval(n, y, Y1)
    F2, e, nn, ee, yy = _eval(n, y, Y2)
    F3, e, nn, ee, yy = _eval(n, y, Y3)

    # Solve using direct search (golden ratio fraction)
    while Y1 - Y0 > tolerance:

        if F2 < F3:
            Y1, F1 = Y3, F3
            Y3, F3 = Y2, F2
            Y2 = Y1 - (Y1 - Y0) / phi
            F2, e, nn, ee, yy = _eval(n, y, Y2)
        else:
            Y0, F0 = Y2, F2
            Y2, F2 = Y3, F3
            Y3 = Y0 + (Y1 - Y0) / phi
            F3, e, nn, ee, yy = _eval(n, y, Y3)

        print Y0, Y1

    # Compute reference value
    Y = 0.5*(Y0 + Y1)

    # Print results
    print
    print "Reference value:", Y

    # Plot result
    if plot:
        pylab.figure()
        pylab.subplot(2, 1, 1)
        pylab.title("Reference value: %g" % Y)
        pylab.semilogx(n, y, 'b-o')
        pylab.semilogx(nn, yy, 'g--')
        pylab.subplot(2, 1, 2)
        pylab.loglog(n, e, 'b-o')
        pylab.loglog(nn, ee, 'g--')
        pylab.grid(True)
        if call_show:
            pylab.show()

    return Y
コード例 #10
0
def plotBode(f*g, lowerFreq, higherFreq = None):
    """'  plot Bode diagram using matplotlib
        Default frequency width is 3 decades
    '"""
    import pylab as py
    #import pdb; pdb.set_trace()
    if ( higherFreq == None):
        rangeAt = 1000.0  # 3 decade
    else:
        assert higherFreq > lowerFreq
        rangeAt = float(higherFreq)/lowerFreq

    N = 128
    lstScannAngFreqAt = [2*sc.pi*1j*lowerFreq*sc.exp(
                                                sc.log(rangeAt)*float(k)/N)
                                for k in range(N)]

    t              = [        lowerFreq*sc.exp(sc.log(rangeAt)*float(k)/N)
                                for k in range(N)]

    py.subplot(211)
    py.loglog( t, [(abs(f*g(x))) for x in lstScannAngFreqAt]  )
    py.ylabel('gain')
    py.grid(True)

    py.subplot(212)
    py.semilogx( t, [sc.arctan2(f*g(zz).imag, f*g(zz).real)
                        for zz in lstScannAngFreqAt])
    py.ylabel('phase')
    py.grid(True)
    py.gca().xaxis.grid(True, which='minor')  # minor grid on too
    py.show()
コード例 #11
0
ファイル: quick_plot.py プロジェクト: imrehg/labhardware
def plotData(filename):
    gatetime, allanvar = np.loadtxt(filename, comments="#", delimiter=",", unpack=True)
    pl.loglog(gatetime, allanvar, '.-')
    pl.xlabel("Gate time (s)")
    pl.ylabel("Allan deviation (Hz)")
    pl.xlim([min(gatetime), max(gatetime)])
    pl.show()
コード例 #12
0
ファイル: plfit.py プロジェクト: robypoteau/tdproject
    def plotcdf(self, x=None, xmin=None, alpha=None, pointcolor='k',
            pointmarker='+', **kwargs):
        """
        Plots CDF and powerlaw
        """
        if x is None: x=self.data
        if xmin is None: xmin=self._xmin
        if alpha is None: alpha=self._alpha

        x=numpy.sort(x)
        n=len(x)
        xcdf = numpy.arange(n,0,-1,dtype='float')/float(n)

        q = x[x>=xmin]
        fcdf = (q/xmin)**(1-alpha)
        nc = xcdf[argmax(x>=xmin)]
        fcdf_norm = nc*fcdf

        D_location = argmax(xcdf[x>=xmin]-fcdf_norm)
        pylab.vlines(q[D_location],xcdf[x>=xmin][D_location],fcdf_norm[D_location],color='m',linewidth=2)

        #plotx = pylab.linspace(q.min(),q.max(),1000)
        #ploty = (plotx/xmin)**(1-alpha) * nc

        pylab.loglog(x,xcdf,marker=pointmarker,color=pointcolor,**kwargs)
        #pylab.loglog(plotx,ploty,'r',**kwargs)
        pylab.loglog(q,fcdf_norm,'r',**kwargs)
コード例 #13
0
ファイル: plfit.py プロジェクト: robypoteau/tdproject
    def plotppf(self,x=None,xmin=None,alpha=None,dolog=True,**kwargs):
        """
        Plots the power-law-predicted value on the Y-axis against the real
        values along the X-axis.  Can be used as a diagnostic of the fit 
        quality.
        """
        if not(xmin): xmin=self._xmin
        if not(alpha): alpha=self._alpha
        if not(x): x=numpy.sort(self.data[self.data>xmin])
        else: x=numpy.sort(x[x>xmin])

        # N = M^(-alpha+1)
        # M = N^(1/(-alpha+1))
        
        m0 = min(x)
        N = (1.0+numpy.arange(len(x)))[::-1]
        xmodel = m0 * N**(1/(1-alpha)) / max(N)**(1/(1-alpha))
        
        if dolog:
            pylab.loglog(x,xmodel,'.',**kwargs)
            pylab.gca().set_xlim(min(x),max(x))
            pylab.gca().set_ylim(min(x),max(x))
        else:
            pylab.plot(x,xmodel,'.',**kwargs)
        pylab.plot([min(x),max(x)],[min(x),max(x)],'k--')
        pylab.xlabel("Real Value")
        pylab.ylabel("Power-Law Model Value")
コード例 #14
0
ファイル: kmeans.py プロジェクト: abonaca/Platypus
def plot_cluster_context(sizes, densities, dir, name=None, k=None, suffix="png"):
    """
    so many conditionals!
    """
    print("plot_cluster_context(): plotting", name)
    if name is None:
        K = len(sizes)
        fn = "{}/clusters_{:04d}.{}".format(dir, K, suffix)
    else:
        fn = "{}/{}_context.{}".format(dir, name, suffix)
    if os.path.exists(fn):
        print("plot_cluster_context(): {} exists already".format(fn))
        return
    if k is None:
        fig = plt.figure(figsize=(6,6))
        plt.subplots_adjust(left=0.15, right=0.97, bottom=0.15, top=0.97)
        ms = 7.5
    else:
        fig = plt.figure(figsize=(4,4))
        plt.subplots_adjust(left=0.2, right=0.96, bottom=0.2, top=0.96)
        ms = 5.0
    plt.clf()
    if name is not None and k is None:
        plt.savefig(fn)
        print("plot_cluster_context(): wrote", fn)
        return
    _clusterplot(sizes, densities, k, ms=ms)
    _clusterlims(sizes, densities)
    plt.ylabel("cluster abundance-space density")
    plt.xlabel("number in abundance-space cluster")
    plt.loglog()
    [l.set_rotation(45) for l in plt.gca().get_xticklabels()]
    [l.set_rotation(45) for l in plt.gca().get_yticklabels()]
    plt.savefig(fn)
    print("plot_cluster_context(): wrote", fn)
コード例 #15
0
def snr_mat_f(mchvec, reds, lum_dist, fmin, fmax, fvec, finteg, tobs, sn_f):
	''''''
	mch_fmat=np.transpose(np.tile(mchvec, (len(reds), len(fvec), finteg, 1) ), axes=(0,3,1,2))
	z_fmat=np.transpose(np.tile(reds, (len(mchvec), len(fvec), finteg, 1) ),axes=(3,0,1,2))
	f_fmat=np.transpose(np.tile(fvec, (len(reds), len(mchvec), finteg, 1) ), axes=(0,1,3,2))
	finteg_fmat=np.transpose(np.tile(np.arange(finteg), (len(reds), len(mchvec), len(fvec), 1) ), axes=(0,1,2,3))
	stshape=np.shape(z_fmat) #Standard shape of all matrices that I will use.
	DL_fmat=np.transpose(np.tile(lum_dist, (len(mchvec), len(fvec), finteg, 1) ),axes=(3,0,1,2)) #Luminosity distance in Mpc.
	flim_fmat=A8.f_cut(1./4., 2.*mch_fmat*2.**(1./5.))*1./(1.+z_fmat) #The symmetric mass ratio is 1/4, since I assume equal masses.
	flim_det=np.maximum(np.minimum(fmax, flim_fmat), fmin) #The isco frequency limited to the detector window.
	tlim_fmat=CM.tafter(mch_fmat, f_fmat, flim_fmat, z_fmat)
	#By construction, f_mat cannot be smaller than fmin or larger than fmax (which are the limits imposed by the detector).
	fmin_fmat=np.minimum(f_fmat, flim_det) #I impose that the minimum frequency cannot be larger than the fisco.
	fmaxobs_fmat=flim_det.copy()
	#fmaxobs_fmat=fmin_fmat.copy()
	fmaxobs_fmat[tobs<tlim_fmat]=CM.fafter(mch_fmat[tobs<tlim_fmat], z_fmat[tobs<tlim_fmat], f_fmat[tobs<tlim_fmat], tobs)
	fmax_fmat=np.minimum(fmaxobs_fmat, flim_det) #The maximum frequency (after an observation tobs) cannot exceed fisco or the maximum frequency of the detector.
	integconst=(np.log10(fmax_fmat)-np.log10(fmin_fmat))*1./(finteg-1)
	finteg_fmat=fmin_fmat*10**(integconst*finteg_fmat)
	sn_vec=sn_f(fvec)##########
	sn_fmat=sn_f(finteg_fmat) #Noise spectral density.
	#htilde_fmat=A8.htilde_f(1./4., 2.*mch_fmat*2**(1./5.), z_fmat, DL_fmat, f_fmat)
	htilde_fmat=A8.htilde_f(1./4., 2.*mch_fmat*2**(1./5.), z_fmat, DL_fmat, finteg_fmat)
	py.loglog(finteg_fmat[0,0,:,0],htilde_fmat[0,0,:,0]**2.)
	py.loglog(finteg_fmat[0,0,:,0],sn_fmat[0,0,:,0])
	snrsq_int_fmat=4.*htilde_fmat**2./sn_fmat #Integrand of the S/N square.
	snrsq_int_m_fmat=0.5*(snrsq_int_fmat[:,:,:,1:]+snrsq_int_fmat[:,:,:,:-1]) #Integrand at the arithmetic mean of the infinitesimal intervals.
	df_fmat=np.diff(finteg_fmat, axis=3) #Infinitesimal intervals.
	snr_full_fmat=np.sqrt(np.sum(snrsq_int_m_fmat*df_fmat,axis=3)) #S/N as a function of redshift, mass and frequency.
	fopt=fvec[np.argmax(snr_full_fmat, axis=2)] #Frequency at which the S/N is maximum, for each pixel of redshift and mass.
	snr_opt=np.amax(snr_full_fmat, axis=2) #Maximum S/N at each pixel of redshift and mass.
	snr_min=snr_full_fmat[:,:,0]
	return snr_opt
コード例 #16
0
def plot_gain_drift(time_len=128, chan=1):
    """ Have a look at the gain drift """    
    nar.load_data() if plot_old else nar.take_data(time_len)

    time_series_on = nar.ts_x_on[:, chan]
    spec_series_on = np.abs(np.fft.fft(time_series_on))

    time_series_off = nar.ts_x_off[:, chan]
    spec_series_off = np.abs(np.fft.fft(time_series_off))
    
    #t  = np.cumsum(np.ones([time_len]))
    #tt = t/ np.max(t) * total_time
    #tu = np.cumsum(np.ones([time_len]))/total_time
    
    #print spec_series_off.shape
    #print tu.shape
    
    plt.subplot(211)
    plt.plot(time_series_on, label="X-on", c=c[1])
    #plt.plot(tt, 10*np.log10(time_series_off)), label="X-off", c=c[2])
    plt.xlabel("Time")
    plt.legend()
    plt.subplot(212)
    
    plt.loglog(spec_series_on, c=c[1])
    #plt.loglog(tu, spec_series_off, c=c[2])
    plt.xlabel("Spectrum")
    plt.show()
コード例 #17
0
def plot_calibrated(time_len=128, chan=1):
    """ Plot calibrated as a function of time """
    nar.load_data() if plot_old else nar.take_data(time_len)
        
    cal   = (nar.ts_x_on + nar.ts_x_off) * Td / (nar.ts_x_on/nar.ts_x_off - 1)
    p_tot = (nar.ts_x_on + nar.ts_x_off)
    time_series  = cal[:, chan]
    p_avg        = np.average(p_tot[:, chan])
    time_series  = time_series / p_avg
    spec_series = np.abs(np.fft.rfft(time_series))
    
    #uncal = nar.ts_x_on
    #uncal = uncal[:, chan]
    #spec_uncal = np.abs(np.fft.rfft(uncal))
    
    t = np.cumsum(np.ones([time_len]))
    t = t /np.max(t) * total_time
    tu = np.cumsum(np.ones([time_len/2+1]))/total_time
    
    plt.subplot(211)
    #plt.plot(t, uncal, c=c[1])
    plt.plot(t, time_series, c=c[0])
    plt.xlabel("Time (s)")
    plt.ylabel("Calibrated signal (K)")
    plt.subplot(212)
    #plt.loglog(tu, spec_uncal, c=c[1])
    plt.loglog(tu, spec_series, c=c[0])

    plt.xlabel("Spectrum (Hz)")
    plt.ylabel("")
    plt.show()
コード例 #18
0
ファイル: utils.py プロジェクト: ianmtaylor1/pacal
def estimateAtInfExponent(f, x, pos = True, fromTo = None, N = 10, deriv = False, debug_plot = False):
    if fromTo is None:
        fromTo = (1,10)
    ex = logspace(fromTo[0], fromTo[1], N)
    if pos:
        lx = ex
    else:
        lx = -ex
    y = abs(f(lx))
    #if deriv:
    #    y -= min(y[isfinite(y)])
    yi = log(y)
    xi = log(abs(ex))
    ind = isfinite(yi)
    xi = xi[ind]
    yi = yi[ind]
    ri = yi[0:-1] - yi[1:]
    di = abs(xi[1:]-xi[0:-1])
    if debug_plot:
        print(xi,yi, f(xi))
        loglog(xi,yi)
    if len(yi) > 1:
        return ri[-1]/di[-1]
    else:
        return 0
コード例 #19
0
def MisorientationScalingCollapseCompareInset(misses, bdlengths, labels, alpha=2.5):
    """ good values for alpha seem to be 4, but 2.5 for experiment """
    colors = ['b', 'r', 'g', 'y']

    pl.rcParams.update({'legend.fontsize': 14,
            'legend.columnspacing':1.2,
        })
    for i, (mis, label, bdlength) in enumerate(zip(misses, labels, bdlengths)):
        t = mis*bdlength/(mis*bdlength).mean()
        dx = 5./100.
        y,x = np.histogram(t, bins=np.linspace(0, 5, 100))
        x = (x[:-1]+x[1:])/2
        y = y.astype('float')/y.sum() / dx
        pl.plot(x, y, colors[i]+'o--', label=label)

        alpha = fit_alpha(x, y)
        xt = np.linspace(0,5, 1000)
        pl.plot(xt, scaling(alpha, xt), colors[i]+'-', label=r"Fit, $\alpha$ = %0.1f" % alpha)

    pl.xlabel(r"$\theta / \theta_{av}$")
    pl.ylabel(r"$\theta_{av}\,P(\theta, \theta_{av})$")
    pl.legend(loc='lower right')

    ax = pl.axes([0.52, 0.52, 0.35, 0.35])
    for i, (mis, label, bdlength) in enumerate(zip(misses, labels, bdlengths)):
        t = mis*bdlength/(mis*bdlength).mean()
        dx = 5./100.
        y,x = np.histogram(t, bins=np.linspace(0, 5, 100))
        x = (x[:-1]+x[1:])/2
        y = y.astype('float')/y.sum() / dx
        pl.plot(x, y, colors[i]+'o-', label=label)
    pl.loglog()

    return x, y
コード例 #20
0
ファイル: pyzmq_throughput.py プロジェクト: minrk/script-dump
def plot_lines(data, n=None):
    import pylab
    if n is None:
        n = data['nmsgs'][0]
    data = extract(data, nmsgs=n)
    assert len(data) > 0, 'no data, probably invalid n=%s'%n
    pylab.figure()
    pylab.plot([],[],'k:',label='sent')
    pylab.xlabel('bytes/msg')
    pylab.ylabel('msgs/sec')
    for copy in range(2):
        if copy:
            label = 'copy'
        else:
            label = 'nocopy'
        for track in range(2):
            if track and copy:
                continue
            track_s = ''
            if track:
                label += '+track'
            A = extract(data, copy=copy, track=track)
            x = A['size']
            lines = pylab.loglog(A['size'], 1.*n/A['wall'], label=label)
            c = lines[0].get_color()
            pylab.loglog(A['size'], 1.*n/A['sent'], ':'+c)
    pylab.legend(loc=0)
    pylab.grid(True)
    
    
コード例 #21
0
ファイル: utils.py プロジェクト: ianmtaylor1/pacal
def estimateDegreeOfPole(f, x, pos = True, fromTo = None, N = 10, deriv = False, debug_plot = False):
    if fromTo is None:
        if x == 0:
            fromTo = (-1,-10)
        else:
            # testing around nonzero singularities is less accurate
            fromTo = (-1,-7)
    ex = logspace(fromTo[0], fromTo[1], N)
    if pos:
        lx = x + ex
    else:
        lx = x - ex
    y = abs(f(lx))
    #if deriv:
    #    y -= min(y[isfinite(y)])
    yi = log(y)
    xi = log(abs(ex))
    ind = isfinite(yi)
    xi = xi[ind]
    yi = yi[ind]
    ri = yi[0:-1] - yi[1:]
    di = abs(xi[1:]-xi[0:-1])
    if debug_plot:
        print(xi,yi, f(xi))
        loglog(xi,yi)
    if len(yi) > 1:
        return ri[-1]/di[-1]
    else:
        return 0
コード例 #22
0
def plot_session_figs(in_file):
    """Plot some graphs on the generated session file:
    - thp vs duration
    - cdf of duration
    """
    sessions = np.load(in_file)
    # duration vs min thp
    pylab.clf()
    pylab.plot(sessions[:, 3], sessions[:, 1], 'k, ')
    pylab.loglog()
    axes = pylab.gca()
    pylab.grid()
    axes = pylab.gca()
    for tick in axes.xaxis.get_major_ticks():
        tick.label1.set_fontsize(16)
    pylab.xlabel("Minimum throughput in kbps", size=16)
    for tick in axes.yaxis.get_major_ticks():
        tick.label1.set_fontsize(16)
    pylab.ylabel("Session duration per client (sec)", size=16)
    pylab.savefig('%s_session_duration_vs_min_thp.pdf' % in_file)
    # nb flows vs avg thp
    pylab.clf()
    pylab.plot(sessions[:, 4], sessions[:, 2], 'k, ')
    pylab.loglog()
    pylab.grid()
#    axes = pylab.gca()
    pylab.xlabel("Average throughput in kbps", size=16)
    pylab.ylabel("Nb of flows", size=16)
    pylab.savefig('%s_session_nb_fl_vs_avg_thp.pdf' % in_file)
    pylab.clf()
    import cdfplot
    cdfplot.cdfplotdata(sessions[:, 1], _xlabel='Duration in seconds',
            _title='Session durations', _fs_legend='x-large')
コード例 #23
0
ファイル: perfdoc.py プロジェクト: 2bbb/compute
def plot_to_file(report, filename):
    global fignum
    fignum += 1
    pylab.figure(fignum)

    run_to_label = {
        "stl" : "C++ STL",
        "thrust" : "Thrust",
        "compute" : "Boost.Compute",
        "bolt" : "Bolt"
    }

    for run in sorted(report.samples.keys()):
        x = []
        y = []

        for sample in report.samples[run]:
            x.append(sample[0])
            y.append(sample[1])

        pylab.loglog(x, y, marker='o', label=run_to_label[run])

    pylab.xlabel("Size")
    pylab.ylabel("Time (ms)")
    pylab.legend(loc='upper left')
    pylab.savefig(filename)
def plotDependencyComponents():

    """Plot thoretical dependency between n_samples and n_components"""

    # range of admissible distortions
    eps_range = np.linspace(0.1, 0.99, 5)
    colors = pl.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))

    # range of number of samples to embed
    n_samples_range = np.logspace(1, 9, 9)

    
    pl.figure()

    for eps, color in zip(eps_range, colors):
        min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, \
                                                         eps=eps)
        pl.loglog(n_samples_range, min_n_components, color=color)

    pl.legend(["eps = %.1f" % eps for eps in eps_range], \
              loc="lower right")

    pl.xlabel("Number of observations to eps-embed")
    pl.ylabel("Minimum number of dimensions")
    pl.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
    pl.show()
コード例 #25
0
ファイル: reddening.py プロジェクト: dhomeier/PopStar
    def romanzuniga07(wavelength, AKs, makePlot=False):
        filters = ['J', 'H', 'Ks', '[3.6]', '[4.5]', '[5.8]', '[8.0]']
        wave =      np.array([1.240, 1.664, 2.164, 3.545, 4.442, 5.675, 7.760])
        A_AKs =     np.array([2.299, 1.550, 1.000, 0.618, 0.525, 0.462, 0.455])
        A_AKs_err = np.array([0.530, 0.080, 0.000, 0.077, 0.063, 0.055, 0.059])
        
        # Interpolate over the curve
        spline_interp = interpolate.splrep(wave, A_AKs, k=3, s=0)

        A_AKs_at_wave = interpolate.splev(wavelength, spline_interp)
        A_at_wave = AKs * A_AKs_at_wave

        if makePlot:
            py.clf()
            py.errorbar(wave, A_AKs, yerr=A_AKs_err, fmt='bo', 
                        markerfacecolor='none', markeredgecolor='blue',
                        markeredgewidth=2)

            # Make an interpolated curve.
            wavePlot = np.arange(wave.min(), wave.max(), 0.1)
            extPlot = interpolate.splev(wavePlot, spline_interp)
            py.loglog(wavePlot, extPlot, 'k-')

            # Plot a marker for the computed value.
            py.plot(wavelength, A_AKs_at_wave, 'rs',
                    markerfacecolor='none', markeredgecolor='red',
                    markeredgewidth=2)
            py.xlabel('Wavelength (microns)')
            py.ylabel('Extinction (magnitudes)')
            py.title('Roman Zuniga et al. 2007')


        return A_at_wave
コード例 #26
0
ファイル: synthetic.py プロジェクト: jluastro/JLU-python-code
def nishiyama09(wavelength, AKs, makePlot=False):
    # Data pulled from Nishiyama et al. 2009, Table 1

    filters = ['V', 'J', 'H', 'Ks', '[3.6]', '[4.5]', '[5.8]', '[8.0]']
    wave =      np.array([0.551, 1.25, 1.63, 2.14, 3.545, 4.442, 5.675, 7.760])
    A_AKs =     np.array([16.13, 3.02, 1.73, 1.00, 0.500, 0.390, 0.360, 0.430])
    A_AKs_err = np.array([0.04,  0.04, 0.03, 0.00, 0.010, 0.010, 0.010, 0.010])

    # Interpolate over the curve
    spline_interp = interpolate.splrep(wave, A_AKs, k=3, s=0)

    A_AKs_at_wave = interpolate.splev(wavelength, spline_interp)
    A_at_wave = AKs * A_AKs_at_wave

    if makePlot:
        py.clf()
        py.errorbar(wave, A_AKs, yerr=A_AKs_err, fmt='bo', 
                    markerfacecolor='none', markeredgecolor='blue',
                    markeredgewidth=2)
        
        # Make an interpolated curve.
        wavePlot = np.arange(wave.min(), wave.max(), 0.1)
        extPlot = interpolate.splev(wavePlot, spline_interp)
        py.loglog(wavePlot, extPlot, 'k-')

        # Plot a marker for the computed value.
        py.plot(wavelength, A_AKs_at_wave, 'rs',
                markerfacecolor='none', markeredgecolor='red',
                markeredgewidth=2)
        py.xlabel('Wavelength (microns)')
        py.ylabel('Extinction (magnitudes)')
        py.title('Nishiyama et al. 2009')

    
    return A_at_wave
コード例 #27
0
ファイル: gosl.py プロジェクト: PatrickSchm/gosl
def ConvIndicator(X, Y, pct=0.1, fs=14, eqaxis=False):
    """
    Convergence indicator icon
    ==========================
    """
    if len(X)<2: raise Exception('at least 2 points are required')
    xx, yy   = log10(X), log10(Y)
    p        = polyfit(xx, yy, 1)
    m        = round(p[0])
    xx0, xx1 = min(xx), max(xx)
    yy0, yy1 = min(yy), max(yy)
    dxx, dyy = xx1-xx0, yy1-yy0
    xxm, yym = (xx0+xx1)/2.0, (yy0+yy1)/2.0
    xxl, xxr = xxm-pct*dxx, xxm+pct*dxx
    shift    = 0.5*pct*dxx*m
    xm,  ym  = 10.0**xxm, 10.0**(yym-shift)
    xl,  xr  = 10.0**xxl, 10.0**xxr
    yl,  yr  = 10.0**(yym+m*(xxl-xxm)-shift),10.0**(yym+m*(xxr-xxm)-shift)
    loglog(X, Y)
    #plot(xm, ym, 'ro')
    #plot(xl, yl, 'go')
    #plot(xr, yr, 'mo')
    points = array([[xl,yl],[xr,yl],[xr,yr]])
    gca().add_patch(Polygon(points, ec='k', fc='None'))
    xxR = xxm+1.2*pct*dxx
    xR  = 10.0**xxR
    text(xR, ym, '%g'%m, ha='left', va='center', fontsize=fs)
    if eqaxis: axis('equal')
    return m
コード例 #28
0
def Run(inputfile,input,shape,fieldtype,corrfunctype,symmetrytype,logbinning,binnum,outputfile,plot):
    """
    Performs RadialCorrelationFunctions on an inputfile that holds field data
    in the space separated values of arbitrary arrangement (i.e. rows / columns don't matter).
    
    Input is either by filename or by numpy.array.  If input is specified it is taken to be the 
    data array.  If not, the inputfile is opened and read for data.

    data structure: array.shape should be like (dimension, dimension[0], dimension[1], ...).
        examples:
            * one dimensional 3-component vector field with 256 values - (3, 256)
            * three dimensional 3-component vector field - (3, 256, 256, 256)
            * 3x3 3d tensor field - (3, 3, 256, 256, 256)
    """
    if input is None:
        import Data_IO
        data = Data_IO.ReadInScalarField(inputfile,shape) 
    else:
        data = input
    corr_func = RadialCorrelationFunctions(data,fieldtype,corrfunctype,symmetrytype,logbinning,binnum,fromState=False)
    if outputfile is None:
        outputfile = inputfile+'_correlationfunction.dat'
    Data_IO.OutputXY(corr_func[0],corr_func[1],outputfile)
    if plot:
        import pylab
        pylab.figure()
        pylab.loglog(corr_func[0],corr_func[1],'.-')
        pylab.xlabel(r'$C(r)$',fontsize=20)
        pylab.ylabel(r'$r$',fontsize=20)
        pylab.show()
コード例 #29
0
ファイル: sqp_plot_filter.py プロジェクト: nlw0/corisco
def main():
    plt.ion()

    fil = FletcherFilter()
    Niter = 12
    logp = plt.zeros((Niter,2))
    for k in range(Niter):
        while True:
            #print k
            p = plt.rand(2)
            if not fil.dominated(p):
                break
        logp[k] = p
        fil.add(p, 0.0, 0.0)
        ff = fil.values[fil.valid]
        ff = plt.r_[[[1e-6,1]], ff[plt.argsort(ff[:,0])], [[1,1e-6]]]
        ww = plt.zeros((ff.shape[0] * 2 - 1, 2))
        ww[::2] = ff
        ww[1::2,0] = ff[1:,0]
        ww[1::2,1] = ff[:-1,1]
        plt.loglog(ww[:,0], ww[:,1], '-')
    plt.loglog(logp[:,0], logp[:,1], 'ys-', lw=2)
    plt.axis([0,1,0,1])
    plt.axis('equal')
    plt.grid()
        
    code.interact()
コード例 #30
0
ファイル: distrib.py プロジェクト: astrofanlee/project_TL
def logskew(d,floors = [],col='',divvypk=[]):
    maxd = max(d.flatten())
    if len(floors) == 0:
        floors = 1./maxd * 2.**N.arange(-5,5.01,1.)
    print floors

    for fl in floors:
        dcopy = 1.*d
        wltf = N.where(dcopy < fl)
        dcopy[wltf] = 0.*dcopy[wltf] + fl

        logrho = N.log(dcopy)
        var  = N.var(decreaseres(logrho,f=16).flatten())
        #skew = SS.skew(logrho.flatten())
        #print fl,'log:',var,'lin:',N.var(dcopy.flatten())
        print fl,'log:',var#,SS.skew(dcopy.flatten())

        k,pk = power.pk(logrho)

        if (len(divvypk) > 0):
            M.loglog(k,pk/divvypk/var,col)

    if (len(divvypk) == 0):
        return pk,pk/var
    else:
        return
コード例 #31
0
 def PlotAttenuation(self):
     pl.loglog(self.energy,self.mu,label=self.label)
     pl.xlabel('Energy [eV]')
     pl.ylabel(r'Mass Attenuation Coefficient [cm$^2$/g]')
コード例 #32
0
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)

# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)

###############################################################################
# Plot results
fig = pl.figure()
pl.title("Regularized covariance: likelihood and shrinkage coefficient")
pl.xlabel('Regularizaton parameter: shrinkage coefficient')
pl.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
pl.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")

pl.plot(pl.xlim(),
        2 * [loglik_real],
        '--r',
        label="Real covariance likelihood")

# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((pl.ylim()[1] - pl.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
pl.vlines(lw.shrinkage_,
コード例 #33
0
def showsounding(ab2, rhoa, resp=None, mn2=None, islog=True, xlab=None):
    """
        Display a sounding curve (rhoa over ab/2) and an additional response.
    """
    if xlab is None:
        xlab = r'$\rho_a$ in $\Omega$m'

    ab2a = N.asarray(ab2)
    rhoa = N.asarray(rhoa)
    if mn2 is None:
        if islog:
            l1 = P.loglog(rhoa, ab2, 'rx-', label='observed')
        else:
            l1 = P.semilogy(rhoa, ab2, 'rx-', label='observed')

        P.hold(True)
        if resp is not None:
            if islog:
                l2 = P.loglog(resp, ab2, 'bo-', label='simulated')
            else:
                l2 = P.semilogy(resp, ab2, 'bo-', label='simulated')

            P.legend((l1, l2), ('obs', 'sim'), loc=0)
    else:
        for unmi in N.unique(mn2):
            if islog:
                l1 = P.loglog(rhoa[mn2 == unmi],
                              ab2a[mn2 == unmi],
                              'rx-',
                              label='observed')
            else:
                l1 = P.semilogy(rhoa[mn2 == unmi],
                                ab2a[mn2 == unmi],
                                'rx-',
                                label='observed')

            P.hold(True)
            if resp is not None:
                l2 = P.loglog(resp[mn2 == unmi],
                              ab2a[mn2 == unmi],
                              'bo-',
                              label='simulated')
                P.legend((l1, l2), ('obs', 'sim'))

    P.axis('tight')
    P.ylim((max(ab2), min(ab2)))
    locs = P.yticks()[0]
    if len(locs) < 2:
        locs = N.hstack((min(ab2), locs, max(ab2)))
    else:
        locs[0] = max(locs[0], min(ab2))
        locs[-1] = min(locs[-1], max(ab2))

    a = []
    for l in locs:
        a.append('%g' % rndig(l))

    P.yticks(locs, a)

    locs = P.xticks()[0]

    a = []
    for l in locs:
        a.append('%g' % rndig(l))

    P.xticks(locs, a)

    P.grid(which='both')
    P.xlabel(xlab)
    P.ylabel('AB/2 in m')
    # P.legend()
    P.show()
    return
コード例 #34
0
ファイル: convergence_test.py プロジェクト: shyams2/Bolt
minmod_err = check_error(params)
minmod_con = np.polyfit(np.log10(N), np.log10(minmod_err), 1)

params.reconstruction_method_in_p = 'piecewise-constant'

pc_err = check_error(params)
pc_con = np.polyfit(np.log10(N), np.log10(pc_err), 1)

print(weno5_err)
print(ppm_err)
print(minmod_err)
print(pc_err)

print('Convergence with WENO5 reconstruction:', weno5_con[0])
print('Convergence with PPM reconstruction:', ppm_con[0])
print('Convergence with minmod reconstruction:', minmod_con[0])
print('Convergence with piecewise-constant reconstruction:', pc_con[0])

pl.loglog(N, weno5_err, '-o', label='WENO5')
pl.loglog(N, ppm_err, '-o', label='PPM')
pl.loglog(N, minmod_err, '-o', label='minmod')
pl.loglog(N, pc_err, '-o', label='Piecewise-Constant')
pl.loglog(N, 1e-3 / N, '--', color='black', label=r'$O(N^{-1})$')
pl.loglog(N, 1e-2 / N**2, '-.', color='black', label=r'$O(N^{-2})$')
pl.xlabel(r'$N$')
pl.ylabel('Error')
pl.legend()
pl.title('With Upwind-Flux Riemann Solver')
pl.savefig('convergenceplot.png')
コード例 #35
0
hf = lambdify(s, Vo, 'numpy')  # makes it as python function
v = hf(ss)
h2 = simplify(Vo)  # Simplifies it and returns reduced form
numer, denom = h2.as_numer_denom()  # Extracts numerator and denominator
num = poly(numer, s)
den = poly(
    denom,
    s)  # making them as polynomials to extract coefficients using all_coeff()

Hlp = sp.lti([float(i) for i in num.all_coeffs()],
             [float(i)
              for i in den.all_coeffs()])  # Impulse response of Lowpass filter
p.figure(1)
p.title("impulse reponse of lowpass filter")
p.xlabel(r'$s$', size=10)
p.loglog(ww, abs(v), lw=2)  # Frequency response of lowpass filter
p.grid(True)


def highpass(R1, R3, C1, C2, G,
             Vi):  # function to get transfer function of highpass circuit

    A = Matrix([[0, 0, 1, -1 / G], [0, G, -G, -1],
                [s * C2 * R3 / (1 + s * C2 * R3), -1, 0, 0],
                [(1 / R1) + s * C1, 1 / R3, 0, -1 / R1]])
    b = Matrix([0, 0, 0, Vi * s * C1])
    V = A.inv() * b
    return (A, b, V)


A, b, V = highpass(10000, 10000, 1e-9, 1e-9, 1.586, 1)
コード例 #36
0
                                   - n_nls * v2_nls**2
                                   - n_nls * v3_nls**2
                                  ) / n_nls

    n_analytic  = n_analytic(q1, params.t_final)
    v1_analytic = v1_analytic(q1, params.t_final)
    T_analytic  = T_analytic(q1, params.t_final)

    error_n[i]  = np.mean(abs(n_nls - n_analytic))
    error_v1[i] = np.mean(abs(v1_nls - v1_analytic))
    error_T[i]  = np.mean(abs(T_nls - T_analytic))

print('Errors Obtained:')
print('L1 norm of error for density:', error_n)
print('L1 norm of error for velocity:', error_v1)
print('L1 norm of error for temperature:', error_T)

print('\nConvergence Rates:')
print('Order of convergence for density:', np.polyfit(np.log10(N), np.log10(error_n), 1)[0])
print('Order of convergence for velocity:', np.polyfit(np.log10(N), np.log10(error_v1), 1)[0])
print('Order of convergence for temperature:', np.polyfit(np.log10(N), np.log10(error_T), 1)[0])

pl.loglog(N, error_n, '-o', label = 'Density')
pl.loglog(N, error_v1, '-o', label = 'Velocity')
pl.loglog(N, error_T, '-o', label = 'Temperature')
pl.loglog(N, error_n[0]*32**2/N**2, '--', color = 'black', label = r'$O(N^{-2})$')
pl.xlabel(r'$N$')
pl.ylabel('Error')
pl.legend()
pl.savefig('convergenceplot.png')
コード例 #37
0
def plot(n, mt, label_name, pattern):

    loglog(n, mt, pattern, basex=2, basey=2)
コード例 #38
0
ファイル: FastHankel.py プロジェクト: michaelJwilson/Spectre
#data = np.loadtxt('/disk1/mjw/HOD_MockRun/Data/SpectralDistortion/hi_k_hod_powerlaw.dat')
#pl.loglog(data[:,0], data[:,1])
##pl.loglog(data[:,0], data[:,2])

##pl.xlim(60., 80.)
##pl.ylim(1., 10.)

#pl.savefig('/disk1/mjw/HOD_MockRun/Plots/FastHank_hi_k_hod_powerlaw.pdf')

pl.clf()

data = np.loadtxt(
    '/disk1/mjw/HOD_MockRun/Data/SpectralDistortion/fftlog_pk_truncpowerlaw_4096_1.00e-10_1.00e+14.dat'
)

pl.loglog(data[:, 0], data[:, 1], 'g', label='mono, FFT log')
pl.loglog(data[:, 0], data[:, 2], 'y', label='quad, FFT log')
pl.loglog(data[:, 0], data[:, 3], 'y', label='hex, FFT log')
pl.loglog(data[:, 0], data[:, 4], 'k', label='clip mono, FFT log')
pl.loglog(data[:, 0], data[:, 5], 'k', label='clip quad, FFT log')

data = np.loadtxt(
    '/disk1/mjw/HOD_MockRun/Data/SpectralDistortion/2D_corrfn_multipoles_5.00.dat'
)
pl.loglog(data[:, 0], np.abs(data[:, 1]), 'k--', label='mono, 3D FFT')
pl.loglog(data[:, 0], np.abs(data[:, 2]), 'r--', label='quad, 3D FFT')
#pl.loglog(data[:,0], np.abs(data[:,3]), 'k--',   label=' hex, 3D FFT')

data = np.loadtxt(
    '/disk1/mjw/HOD_MockRun/Data/SpectralDistortion/2D_corrfn_Suppressedmultipoles_5.00.dat'
)
コード例 #39
0
def maximal_example(eta_list=array([0.001]), Nadapt=5, timet=1.,
                    period=2 * pi):
    ### CONSTANTS

    ### SETUP SOLUTION
    #testsol = '0.1*sin(50*x+2*pi*t/T)+atan(-0.1/(2*x - sin(5*y+2*pi*t/T)))';
    sx = Symbol('sx')
    sy = Symbol('sy')
    sT = Symbol('sT')
    st = Symbol('st')
    spi = Symbol('spi')
    testsol = 0.1 * pysin(50 * sx + 2 * spi * st / sT) + pyatan(
        -0.1, 2 * sx - pysin(5 * sy + 2 * spi * st / sT))
    ddtestsol = str(diff(testsol, sx, sx) + diff(testsol, sy, sy)).replace(
        'sx', 'x[0]').replace('sy', 'x[1]').replace('spi', 'pi')

    # replacing **P with pow(,P)
    ddtestsol = ddtestsol.replace("(2*x[0] - sin(5*x[1] + 2*pi*st/sT))**2",
                                  "pow(2*x[0] - sin(5*x[1] + 2*pi*st/sT),2.)")
    ddtestsol = ddtestsol.replace("cos(5*x[1] + 2*pi*st/sT)**2",
                                  "pow(cos(5*x[1] + 2*pi*st/sT),2.)")
    ddtestsol = ddtestsol.replace(
        "(pow(2*x[0] - sin(5*x[1] + 2*pi*st/sT),2.) + 0.01)**2",
        "pow((pow(2*x[0] - sin(5*x[1] + 2*pi*st/sT),2.) + 0.01),2.)")
    ddtestsol = ddtestsol.replace(
        "(1 + 0.01/pow(2*x[0] - sin(5*x[1] + 2*pi*st/sT),2.))**2",
        "pow(1 + 0.01/pow(2*x[0] - sin(5*x[1] + 2*pi*st/sT),2.),2.)")
    ddtestsol = ddtestsol.replace("(2*x[0] - sin(5*x[1] + 2*pi*st/sT))**5",
                                  "pow(2*x[0] - sin(5*x[1] + 2*pi*st/sT),5.)")
    #insert values
    ddtestsol = ddtestsol.replace('sT', str(period)).replace('st', str(timet))
    testsol = str(testsol).replace('sx', 'x[0]').replace('sy', 'x[1]').replace(
        'spi', 'pi').replace('sT', str(period)).replace('st', str(timet))
    ddtestsol = "-(" + ddtestsol + ")"

    error_list = []
    dof_list = []
    for eta in eta_list:
        meshsz = 40
        ### SETUP MESH
        #   mesh = RectangleMesh(0.4,-0.1,0.6,0.3,1*meshsz,1*meshsz,"left/right") #shock
        #   mesh = RectangleMesh(-0.75,-0.3,-0.3,0.5,1*meshsz,1*meshsz,"left/right") #waves
        mesh = RectangleMesh(-1.5, -0.25, 0.5, 0.75, 1 * meshsz, 1 * meshsz,
                             "left/right")  #shock+waves

        def boundary(x):
            return near(x[0],mesh.coordinates()[:,0].min()) or near(x[0],mesh.coordinates()[:,0].max()) \
            or near(x[1],mesh.coordinates()[:,1].min()) or near(x[1],mesh.coordinates()[:,1].max())

        # PERFORM ONE ADAPTATION ITERATION
        for iii in range(Nadapt):
            startTime = time()
            V = FunctionSpace(mesh, "CG", 2)
            dis = TrialFunction(V)
            dus = TestFunction(V)
            u = Function(V)
            #     R = interpolate(Expression(ddtestsol),V)
            a = inner(grad(dis), grad(dus)) * dx
            L = Expression(ddtestsol) * dus * dx  #
            bc = DirichletBC(V, Expression(testsol), boundary)
            solve(a == L, u, bc)
            soltime = time() - startTime

            startTime = time()
            H = metric_pnorm(u, eta, max_edge_ratio=50, CG0H=3, p=4)
            metricTime = time() - startTime
            if iii != Nadapt - 1:
                mesh = adapt(H)
                TadaptTime = time() - startTime
                L2error = errornorm(Expression(testsol),
                                    u,
                                    degree_rise=4,
                                    norm_type='L2')
                printstr = "%5.0f elements, %0.0e L2error, adapt took %0.0f %% of the total time, (%0.0f %% of which was the metric calculation)" \
                 % (mesh.num_cells(),L2error,TadaptTime/(TadaptTime+soltime)*100,metricTime/TadaptTime*100)
                if len(eta_list) == 1:
                    print(printstr)
            else:
                error_list.append(L2error)
                dof_list.append(len(u.vector().array()))
                print(printstr)

    if len(dof_list) > 1:
        dof_list = array(dof_list)
        error_list = array(error_list)
        figure()
        loglog(dof_list, error_list, '.b-', linewidth=2, markersize=16)
        xlabel('Degree of freedoms')
        ylabel('L2 error')


#    # PLOT MESH
#    figure()
    coords = mesh.coordinates().transpose()
    #    triplot(coords[0],coords[1],mesh.cells(),linewidth=0.1)
    #    #savefig('mesh.png',dpi=300) #savefig('mesh.eps');

    figure()  #solution
    testf = interpolate(Expression(testsol), FunctionSpace(mesh, 'CG', 1))
    vtx2dof = vertex_to_dof_map(FunctionSpace(mesh, "CG", 1))
    zz = testf.vector().array()[vtx2dof]
    hh = tricontourf(coords[0], coords[1], mesh.cells(), zz, 100)
    colorbar(hh)
    #savefig('solution.png',dpi=300) #savefig('solution.eps');

    figure()  #analytical solution
    testfe = interpolate(u, FunctionSpace(mesh, 'CG', 1))
    zz = testfe.vector().array()[vtx2dof]
    hh = tricontourf(coords[0], coords[1], mesh.cells(), zz, 100)
    colorbar(hh)
    #savefig('analyt.png',dpi=300) #savefig('analyt.eps');

    figure()  #error
    zz -= testf.vector().array()[vtx2dof]
    zz[zz == 1] -= 1e-16
    hh = tricontourf(mesh.coordinates()[:, 0],
                     mesh.coordinates()[:, 1],
                     mesh.cells(),
                     zz,
                     100,
                     cmap=get_cmap('binary'))
    colorbar(hh)

    hold('on')
    triplot(mesh.coordinates()[:, 0],
            mesh.coordinates()[:, 1],
            mesh.cells(),
            color='r',
            linewidth=0.5)
    hold('off')
    axis('equal')
    box('off')
    title('error')
    show()
コード例 #40
0
def analyze(
    paths
):  #as used with the parameter search, paths will have only one entry.  But keep consistent with interactive vaspoutCombineRunsExtData
    extpath = None
    useSym = False
    coloring = 'method'
    # coloring = 'indiv'
    doLegend = True
    doLabel = True
    smoothFactor = 2.0
    filter = '_'  #string must be in dir name to be included
    filter2 = None  #'Cu_1' #for single structures.  set to None if using filter1 only
    summaryPath = paths[0]
    #count the number of plots:
    iplot = 0
    maxCalcs = 0
    maxNk = 0
    methods = []
    for ipath, path in enumerate(paths):
        method = path.split('_')[-1].split('/')[0]
        methods.append(method)
        os.chdir(path)
        if filter2 == None:
            structs = sorted([
                d for d in os.listdir(os.getcwd())
                if os.path.isdir(d) and filter in d
            ])
        else:
            structs = sorted([
                d for d in os.listdir(os.getcwd())
                if os.path.isdir(d) and d == filter2
            ])
        for struct in structs:
            os.chdir(struct)
            iplot += 1
            calcs = sorted(
                [d for d in os.listdir(os.getcwd()) if os.path.isdir(d)])
            if len(calcs) > maxCalcs: maxCalcs = len(calcs)
            os.chdir(path)

    #external data is of the form extpath/atom_method/struct.csv.  The csv has energies vs nK
    if not extpath is None:
        os.chdir(extpath)
        atoms_methods = sorted([
            d for d in os.listdir(extpath) if os.path.isdir(d) and filter in d
        ])  # os.chdir(extpath)
        for atom_method in atoms_methods:
            atom = atom_method.split('_')[0]
            os.chdir(atom_method)
            os.system('rm -r .*lock*')
            for structfile in os.listdir(os.getcwd()):
                if atom not in structfile:
                    os.system('mv {} {}_{}'.format(
                        structfile, atom,
                        structfile))  #so that file has atom name at beginning
            if filter2 == None:
                structfiles = sorted([
                    d for d in os.listdir(os.getcwd())
                    if os.path.getsize(d) > 0
                ])
            else:
                structfiles = sorted([
                    d for d in os.listdir(os.getcwd())
                    if '_'.join(d.split('_')[:2]) == filter2
                    and os.path.getsize(d) > 0
                ])
            for structfile in structfiles:
                iplot += 1
                #count number of points in this structfile
                lines = readfile(structfile)
                if len(lines) > maxCalcs: maxCalcs = len(lines)
            os.chdir(extpath)

    nplots = iplot
    if nplots < len(paths): sys.exit('Stop.  Structures do not match filter')
    data = zeros(nplots,dtype = [('ID', 'S25'),('color', 'S15'),('method', 'S15'),\
                                 ('nDone','int32'),('nAtoms','int32'),('nops','int8'),\
                                 ('IBZvolcut','float'),('IBZvol','float'),\
                                 ('eners', '{}float'.format(maxCalcs)), ('errs', '{}float'.format(maxCalcs)),\
                                 ('nKs', '{}int16'.format(maxCalcs)),('ns', '{}int8'.format(maxCalcs))])
    # style.use('bmh')
    # for i, item in enumerate(rcParams['axes.prop_cycle']):
    #     colorsList.append(item['color'])
    style.use('fivethirtyeight')
    # for i, item in enumerate(rcParams['axes.prop_cycle'][:-2]):
    #     colorsList.append(item['color'])

    colorsList = [
        u'#30a2da', u'#fc4f30', u'#e5ae38', u'#6d904f', u'#8b8b8b', u'#348ABD',
        u'#A60628', u'#7A68A6', u'#467821', u'#D55E00', u'#CC79A7', u'#56B4E9',
        u'#009E73', u'#F0E442', u'#0072B2'
    ]

    colorsList = colorsList + ['b', 'm', 'y', 'c', 'k']
    rcParams.update({'figure.autolayout': True})
    rcParams['axes.facecolor'] = 'white'
    rcParams['axes.linewidth'] = 1.0
    rcParams['axes.edgecolor'] = 'black'  # axisbg=axescolor
    rcParams['savefig.facecolor'] = 'white'  # axisbg=axescolor
    rcParams['lines.markersize'] = 4.5
    #read all the data
    iplot = -1
    for ipath, path in enumerate(paths):  #my data
        tag = path.split('/')[-1][-7:]
        os.chdir(path)
        if filter2 == None:
            structs = sorted([
                d for d in os.listdir(os.getcwd())
                if os.path.isdir(d) and filter in d
            ])
        else:
            structs = sorted([
                d for d in os.listdir(os.getcwd())
                if os.path.isdir(d) and d == filter2
            ])
        nStructs = len(structs)
        #         print structs,path
        for istruct, struct in enumerate(structs):
            #         print 'test', istruct, struct
            #             print 'struct',struct
            os.chdir(struct)
            if coloring == 'indiv':
                #             if iplot < nplots -1:
                color = rgb2hex(cm.jet(1. * (iplot + 1) / float(nplots)))
    #             else:
    #                 color = 'k'
            elif coloring == 'method':
                #             color =  colorsList[ipath]
                color = None
            calcs = sorted([
                d for d in os.listdir(os.getcwd())
                if os.path.isdir(d) and os.path.exists('{}/OUTCAR'.format(d))
            ])
            energies = []
            nKs = []
            ns = []  #the base n of the run run
            nDone = 0
            if useSym:
                try:
                    nops, IBZvolcut, IBZvol = readSym(calcs[0])
                except:
                    sys.exit('Stopping. readSym failed. Set useSym to False')
            for calc in calcs:
                if electronicConvergeFinish(calc):
                    ener = getEnergy(calc)  #in energy/atom
                    if not areEqual(ener, 0, 1e-5):
                        nDone += 1
                        energies.append(ener)
                        if 'vc' in path:
                            nK = getNkIBZ(calc, 'KPOINTS')

                        else:
                            nK = getNkIBZ(calc, 'IBZKPT')
                        if nK > maxNk: maxNk = nK
                        nKs.append(nK)
                        ns.append(int(calc.split('_')[-1]))
            #sort by increasing number of kpoints
            if len(energies) > 0:
                iplot += 1
                nKs = array(nKs)
                energies = array(energies)
                ns = array(ns)
                order = argsort(nKs)
                #         print 'struct',struct
                #         print 'energies',energies
                energies = energies[order]
                ns = ns[order]
                nKs = sort(nKs)
                eref = energies[
                    -1]  #the last energy of each struct is that of the most kpoints
                errs = abs(energies - eref) * 1000 + 1e-4  #now in meV
                data[iplot]['ID'] = '{} {}'.format(struct, tag)
                nAtoms = getNatoms('{}/POSCAR'.format(calc))
                data[iplot]['nAtoms'] = nAtoms
                if useSym:
                    data[iplot]['nops'] = nops
                    data[iplot]['IBZvolcut'] = IBZvolcut
                data[iplot]['nDone'] = nDone
                data[iplot]['eners'][:nDone] = energies
                data[iplot]['errs'][:nDone] = errs
                data[iplot]['nKs'][:nDone] = nKs
                data[iplot]['ns'][:nDone] = ns
                data[iplot]['color'] = color
                method = path.split('_')[-1].split('/')[0]
                data[iplot]['method'] = method
            os.chdir(path)
    # os.chdir(extpath)
    if not extpath is None:
        os.chdir(extpath)
        #         print; print atoms_methods
        for atom_method in atoms_methods:
            os.chdir(atom_method)
            if coloring == 'method':
                color = None
                if 'MP' in atom_method:
                    #                 color = colorsList[len(paths)]
                    method = 'MP'

                elif 'Mueller' in atom_method:
                    #                 color = colorsList[len(paths)+1]
                    method = 'Mueller'
                if method not in methods:
                    methods.append(method)
            if filter2 == None:
                structfiles = sorted([
                    d for d in os.listdir(os.getcwd())
                    if os.path.getsize(d) > 0
                ])
            else:
                structfiles = sorted([
                    d for d in os.listdir(os.getcwd())
                    if '_'.join(d.split('_')[:2]) == filter2
                    and os.path.getsize(d) > 0
                ])
            for structfile in structfiles:
                if useSym:
                    nops, IBZvolcut, nAtoms = copyData(structfile, data)
                if coloring == 'indiv':
                    if iplot < nplots - 1:
                        color = cm.jet(1. * (iplot + 1) / float(nplots))
                    else:
                        color = 'k'
                iplot += 1
                energies = []
                nKs = []
                lines = readfile(structfile)
                for line in lines:
                    nK = int(line.split('\t')[0])
                    if nK > maxNk: maxNk = nK
                    nKs.append(nK)
                    energies.append(-float(line.split('\t')[1].split('\r')[0]))
                nKs = array(nKs)
                energies = array(energies)
                nDone = len(energies)
                order = argsort(nKs)
                energies = energies[order]
                eref = energies[
                    -1]  #the last energy of each struct is that of the most kpoints
                nKs = sort(nKs)
                errs = abs(energies - eref) * 1000 + 1e-4  #now in meV
                struct = '_'.join(structfile.split('_')[:2])
                data[iplot]['ID'] = atom_method + struct
                data[iplot]['nAtoms'] = nAtoms
                if useSym:
                    data[iplot]['nops'] = nops
                    data[iplot]['IBZvolcut'] = IBZvolcut
                data[iplot]['nDone'] = len(energies)
                data[iplot]['eners'][:nDone] = energies
                data[iplot]['errs'][:nDone] = errs
                data[iplot]['nKs'][:nDone] = nKs
                data[iplot]['color'] = color
                data[iplot]['method'] = method
            os.chdir(extpath)
    nplots = iplot + 1

    lines = [' ID , nKIBZ , ener , err, nAtoms, nops,IBZcut\n']
    for iplot in range(nplots):
        n = data[iplot]['nDone']
        for icalc in range(n):  #data[iplot]['eners'][:n].tolist()
            lines.append('{}_n{},{},{:15.12f},{:15.12f},{},{},{}\n'.format(data[iplot]['ID'],\
              data[iplot]['ns'][icalc], data[iplot]['nKs'][icalc],\
              data[iplot]['eners'][icalc],data[iplot]['errs'][icalc],\
              data[iplot]['nAtoms'],data[iplot]['nops'],data[iplot]['IBZvolcut']))
    writefile(lines, '{}/summary.csv'.format(summaryPath))

    #plots
    if maxNk > 1:
        if filter[0] == '_': filter = ''  #labels can't begin with _
        #         plotTypes = ['linear','loglog', 'loglinear'];ylabels = ['Vasp error energy/atom (eV)','Error (meV)','Error (meV)']
        # print 'plot only loglog'
        plotTypes = ['loglog']
        ylabels = ['Error (meV)']
        #     plotTypes = []

        xtext = 'N k-points'

        for it, plotType in enumerate(plotTypes):
            fig = figure()
            ax1 = fig.add_subplot(111)
            xlabel(xtext)
            ylabel(ylabels[it])
            # title('Convergence vs mesh method')
            #ylim((1e-12,1e0))
            oldmethod = ''
            methods2 = []
            for iplot in range(nplots):
                labelStr = None
                n = data[iplot]['nDone']
                if coloring == 'method':
                    method = data[iplot]['method']
                    data[iplot]['color'] = colorsList[methods.index(method)]
                    if method != oldmethod and method not in methods2:
                        if doLabel:
                            labelStr = '{} {}'.format(filter,
                                                      data[iplot]['method'])
                        plotData(fig, summaryPath, data[iplot], n, plotType,
                                 filter, doLegend, labelStr)
                        oldmethod = method
                        labelStr = None
                        methods2.append(method)
                    else:
                        plotData(fig, summaryPath, data[iplot], n, plotType,
                                 filter, doLegend, labelStr)
                elif coloring == 'indiv':
                    if doLabel:
                        labelStr = '{} {}'.format(filter, data[iplot]['ID'])
                    plotData(data[iplot], n, plotType, filter, doLegend,
                             labelStr)
        #Method averaging
        if coloring == 'method':
            #         print 'Averaging, plotting method errors'
            nbins = int(10 * ceil(log10(maxNk)))  # 10 bins per decade
            nKbins = array([(10.0**(1 / 10.0))**i for i in range(nbins)])
            fig = figure()
            ax1 = fig.add_subplot(111)
            xlabel('N k-points (smoothed by factor {})'.format(
                int(smoothFactor)))
            ylabel('Error (meV)')
            methodCostsLogs = []
            for im, method in enumerate(methods):
                methnKmax = 0
                binCounts = zeros(nbins, dtype=int32)
                binErrs = zeros(nbins, dtype=float)
                costLogs = zeros(
                    nbins, dtype=float
                )  # "Costs" relative to excellent Si Monkhorst Pack, which has err = 10^3/nK^3 + 10^-3 meV.
                for iplot in range(nplots):
                    if data[iplot]['method'] == method:
                        for icalc in range(data[iplot]['nDone'] - 1):
                            nK = data[iplot]['nKs'][icalc]
                            if nK > methnKmax: methnKmax = nK
                            if nK > 1:
                                for ibin in range(nbins):
                                    if abs(log10(nK/nKbins[ibin])) <= log10(smoothFactor)\
                                      and nKbins[ibin]<= maxNk:
                                        binErrs[ibin] += data[iplot]['errs'][
                                            icalc]
                                        costLogs[ibin] += log10(
                                            data[iplot]['errs'][icalc] /
                                            (10**3 / (nK**3.0) + 0.001))
                                        binCounts[ibin] += 1
                mask = where(binCounts > 0)
                binErrs2 = binErrs[mask[0]]
                binCounts2 = binCounts[mask[0]]
                nKbins2 = nKbins[mask[0]]
                costLogs2 = costLogs[mask[0]]
                nbins2 = len(nKbins2)
                avgErrs = [
                    binErrs2[ibin] / binCounts2[ibin] for ibin in range(nbins2)
                ]
                avgcostLogs = [
                    costLogs2[ibin] / binCounts2[ibin]
                    for ibin in range(nbins2)
                ]
                avgcostLins = [10**avgcostLogs[ibin] for ibin in range(nbins2)]
                methodCostsLogs.append(mean(avgcostLogs))
                loglog(nKbins2,avgErrs,label = method,\
                      color = colorsList[im], marker = None)
                loglog(nKbins2,avgcostLins,label = None,\
                      color = colorsList[im], marker = None,linestyle=':')
                #         print 'Method',method, 'nKmax',methnKmax, 'avgLogCost', mean(avgcostLogs)
                legend(loc='lower left', prop={'size': 12})
                fig.savefig('{}/methodErrs'.format(summaryPath))
            close('all')
    if maxNk > 1:
        return [methodCostsLogs[0], mean(data['nDone'])
                ]  #there is only one method when running this routine
    else:
        return [100, 0]
コード例 #41
0
        """
        sqrt_nu = self.delta_c() / self._growth / self.sigma_m(mass)
        if mass > 1e55:
            print mass, self.delta_c(), self.delta_c(
            ) / self._growth, self._growth, sqrt_nu * sqrt_nu, self.sigma_m(
                mass)

        return sqrt_nu * sqrt_nu


if __name__ == '__main__':
    redshift = 0.0231
    cosmo = CosmologyFunctions(redshift)
    print '%.5f' % (cosmo.E(redshift))
    print '%.2e' % cosmo.rho_bar()
    print cosmo.omega_m()
    print '%.3e' % ((cosmo.E(redshift) / cosmo._h) *
                    cosmo.comoving_distance()**2 / cosmo._h**2)
    sys.exit()
    kmin = 1e-4
    kmax = 1e4
    dlnk = np.float64(np.log(kmax / kmin) / 100.)
    lnkarr = np.linspace(np.log(kmin), np.log(kmax), 100)
    karr = np.exp(lnkarr).astype(np.float64)
    #No little h
    pk_arr = np.array([cosmo.linear_power(k / cosmo._h) for k in karr]).astype(
        np.float64) / cosmo._h / cosmo._h / cosmo._h
    np.savetxt('pk_%.1f.txt' % redshift, np.transpose((karr, pk_arr)))
    pl.loglog(karr, pk_arr)
    pl.show()
コード例 #42
0
            count=count+1
    Degree[i-1]=count
    count=0
#print Degree
#print max(Degree)
CountDegree=[0]*max(Degree)
probability=[0]*max(Degree)
for j in range(1, max(Degree)+1):
    for i in range(0, len(Degree)):
       if j==Degree[i]:
            CountDegree[j-1]=CountDegree[j-1]+1
#print CountDegree
for j in range(1, max(Degree)+1):
     probability[j-1]=float(CountDegree[j-1])/1682
#print probability
Maxdegree=[0]*max(Degree)
for i in range(1, max(Degree)+1):
    Maxdegree[i-1]=i
#print Maxdegree
print('Movielens Bottom')
font = {'family': 'serif',
        'color':  'darkblue',
        'weight': 'normal',
        'size': 16,
        }
plt.xlabel ('Degree Value',fontdict=font)
plt.ylabel('Probability (Pk)',fontdict=font)
plt.loglog(Maxdegree,probability,'.',color='m')
#plt.grid(True)
plt.show()
コード例 #43
0
ファイル: stellar.py プロジェクト: michaelJwilson/LBGCMB
    '''

    ##  SM / HM relation for dropouts from Ishikawa+17.
    ##  Quoting log10(Mstar / Mhalo) with Mhalo in Msun / h.
    data = np.loadtxt('../dat/stellar/smhm.txt')

    mh = data[:, 0]

    ud = data[:, 1]
    gd = data[:, 2]
    rd = data[:, 3]

    print mh
    print ud
    print gd
    print rd

    pl.loglog(mh, (mh / params['h_100']) * 10.**ud, label=r'$u$-dropouts')
    pl.loglog(mh, (mh / params['h_100']) * 10.**gd, label=r'$g$-dropouts')
    pl.loglog(mh, (mh / params['h_100']) * 10.**rd, label=r'$r$-dropouts')

    pl.xlabel(r'$M_h \ [M_\odot / h]$')
    pl.ylabel(r'$M_\star \ [M_\odot]$')

    ## pl.ylim(5.e-4, 5.e-2)

    pl.legend()
    pl.savefig('../plots/stellar_mass.pdf', bbox_inches='tight')

    print('\n\nDone.\n\n')
コード例 #44
0
d1,d2 = int(d1),int(d2)

umags = pspec.keys(); umags.sort()
print 'Generating plots'
for cnt, umag in enumerate(umags):
    p.subplot(d1,d2,cnt + 1)
    fqs = pspec[umag].keys(); fqs.sort()
    if VERSUS_K:
        for i,fq in enumerate(fqs):
            color = 'kbgrcmy'[i%7]
            symbol = ['-','--','-.',':'][(i/7)%4]
            _ks,_pspec = pspec[umag][fq]['_ks'], pspec[umag][fq]['_pspec']
            if PLOT_KCUBE:
                # For k^3/(2pi^2)P(k), remember we've already divided by (2pi)^3
                #p.loglog(_ks, 4*n.pi*_ks**3*1e6*n.abs(_pspec), color+symbol, label=str(fq))
                p.loglog(_ks, 4*n.pi*_ks**3*1e6*n.abs(_pspec/1e-7*1.4e-5), color+symbol, label=str(fq))
            else:
                p.loglog(_ks, 1e6*n.abs(_pspec), color+symbol, label=str(fq))
        if PLOT_KCUBE:
            p.loglog(_ks, 4*n.pi*_ks**3*1e6*late_eor_pspec(_ks), 'k-')
        else:
            p.loglog(_ks, 1e6*late_eor_pspec(_ks), 'k-')
        p.xlabel(r'$k (h\ {\rm Mpc})^{-1}$')
        p.xlim(1e-2,2e0)
        #p.ylim(1e0,1e10)
        p.ylabel(r'${\rm mK}^2$')
        p.grid()
    else:
        pspec_key,ks_key = '_pspec','_ks'
        data = n.array([pspec[umag][fq][pspec_key] for fq in fqs])
        ks = n.log10(n.array([pspec[umag][fq][ks_key] for fq in fqs]))
コード例 #45
0
ファイル: showradius.py プロジェクト: xzhang-awi/pism
parser.add_argument('infiles',
                    metavar='FILENAME',
                    nargs='+',
                    help='input file name (NetCDF)')
args = parser.parse_args()

plt.figure(figsize=(12, 6))

for j in range(len(args.infiles)):
    nc = netCDF.Dataset(args.infiles[j], "r")
    t = nc.variables["time"][:]
    ice_area_glacierized = nc.variables["ice_area_glacierized"][:]
    nc.close()
    plt.loglog(
        t[t > 2],
        np.sqrt(ice_area_glacierized[t > 2] / np.pi) * 100.0,
        linewidth=2.0,  # after t=2s, and in cm
        label=args.infiles[j])

if args.datafile != None:
    A = np.loadtxt(args.datafile)
    data_t = A[:, 0]
    data_rN = A[:, 1]
    plt.loglog(data_t, 100.0 * data_rN, 'ko', label='observed',
               ms=4)  # cm versus s

plt.legend(loc='upper left')
plt.xticks([1.0, 10.0, 100.0, 1000.0])
plt.yticks([1.0, 10.0])
plt.axis([1.0, 1000.0, 1.0, 30.0])
plt.xlabel("t  (s)", size=14)
コード例 #46
0
################################################### 
# plot gene gains and losses vs fixations of SNPs #
###################################################

# clip for log scale
gene_hamming_matrix_gain = numpy.clip(gene_hamming_matrix_gain,1e-13,1e09)
gene_hamming_matrix_loss = numpy.clip(gene_hamming_matrix_loss,1e-13,1e09)

pylab.figure() 
pylab.xlabel('Num substitutions')
pylab.ylabel('Num gene differences')
pylab.ylim([1e-14,1e04])
pylab.xlim([1e-14,1e05])
pylab.title(species_name)

pylab.loglog(fraction_snp_difference[diff_subject_snp_idxs], gene_hamming_matrix_loss[diff_subject_gene_idxs] + gene_hamming_matrix_gain[diff_subject_gene_idxs],'ro')
pylab.loglog(fraction_snp_difference[time_pair_snp_idxs], gene_hamming_matrix_gain[time_pair_gene_idxs],'yo')
pylab.loglog(fraction_snp_difference[time_pair_snp_idxs], gene_hamming_matrix_loss[time_pair_gene_idxs],'bo')

pylab.legend(['diff subjects differences', 'gains','losses'],'upper right',prop={'size':6})

pylab.savefig('%s/%s_gene_gain_loss_vs_substitutions.png' % (parse_midas_data.analysis_directory, species_name), bbox_inches='tight', dpi=300)

### redo plot with unique time pairs (so that every point is iid)
pylab.figure() 
pylab.xlabel('Num substitutions')
pylab.ylabel('Num gene differences')
pylab.ylim([1e-14,1e04])
pylab.xlim([1e-14,1e05])
pylab.title(species_name)
コード例 #47
0
ファイル: plot_sigloss.py プロジェクト: karakundert/capo
                    right=0.95)

#Plot 2
p.figure(1)
pklo, pkhi = 1e-6, 1e18  #1e2,1e14
ax2 = p.subplot(gs[4])  #used to be 2
#p.loglog(pIs, pCs, 'k.')
p.setp(ax2.get_yticklabels(),
       visible=False)  #uncomment if no left-hand P(k) plot
p.errorbar(pIs,
           n.abs(pCs),
           xerr=2 * pIs_err,
           yerr=2 * pCs_err,
           capsize=0,
           fmt='k.')
p.loglog([pklo, pkhi], [pklo, pkhi], 'k-')
p.xlim(pklo, pkhi)
p.ylim(pklo, pkhi)
p.xlabel(r'$P_{\rm in}(k)\ [{\rm mK}^2\ (h^{-1}\ {\rm Mpc})^3]$', fontsize=14)
p.ylabel(r'$P_{\rm out}(k)\ [{\rm mK}^2\ (h^{-1}\ {\rm Mpc})^3]$', fontsize=14)
p.grid()
pkup = max(n.abs(pCvs))
pkdn = min(n.abs(pCvs))
p.fill_between([pklo, pkhi], [pkdn, pkdn], [pkup, pkup],
               facecolor='gray',
               edgecolor='gray')
"""
for kpl,pk,err in zip(kpls,pks,errs):
    #p.([pklo,pkhi], [pk,pk], 'r')
    pkup = max(pk+err,1e-6)
    pkdn = max(pk-err,1e-6)
コード例 #48
0
pl.plot(bg.field('lambda'), bcg.rich, 'bo', label='Full Sample', alpha=1)
#pl.plot(bg[ok].field('lambda'),bcg[ok].rich,'r.',label = 'good center lambda')
pl.plot(bg[ok].field('lambda'),
        bcg[ok].rich,
        'ro',
        label='Strict Model Definition')

#pl.plot(bg[bad].field('lambda'),bcg[bad].rich,'r.',label = 'bad center lambda')

#pl.plot(bg[ok].field('lambda'),bcg[ok].rich,'r.',label ='aic2 <= aic1',alpha= 1)

pl.xlabel('Lambda')
pl.ylabel('GMBCG Richness')
pl.legend(loc='best')
pl.loglog()
pl.ylim(1, 200)

#----separation -------
wrongRem = (bcg.ccolor < bcg.mu1) * good
correctRem = (bcg.ccolor < bcg.mu1) * bad
Nwrong = len(bcg[wrongRem])
Nright = len(bcg[correctRem])
Nbad = len(bcg[bad])

pl.hist(bcg[good].ccolor - bcg[good].mu1,
        bins=20,
        normed=True,
        label='good center',
        alpha=0.3)
pl.hist(bcg[bad].ccolor - bcg[bad].mu1,
コード例 #49
0
ファイル: dc1dsmooth.py プロジェクト: ziogibom/gimli
f = g.DC1dRhoModelling(thk, ab2, mn2)

inv = g.RInversion(rhoa, f, True)
model = g.RVector(nlay, P.median(rhoa))
inv.setModel(model)
inv.setTransData(transRhoa)
inv.setTransModel(transRho)
inv.setRelativeError(errPerc / 100.0)
inv.setLambda(lam)
model = inv.run()

model2 = g.RVector(nlay, P.median(rhoa))
inv.setModel(model2)
inv.setBlockyModel(True)
model2 = inv.run()

fig = P.figure(1)
fig.clf()
ax1 = fig.add_subplot(121)
P.loglog(rhoa, ab2, 'rx-', inv.response(), ab2, 'b-')
P.axis('tight')
P.ylim((max(ab2), min(ab2)))
P.grid(which='both')
P.xlabel(r"\rho_a in \Omegam")
P.ylabel("AB/2 in m")
P.legend(("measured", "fitted"), loc="upper left")
ax2 = fig.add_subplot(122)
draw1dmodel(model, thk, r'\rho in \Omega m')
draw1dmodel(model2, thk, r'\rho in \Omega m')
draw1dmodel([100, 500, 20, 1000], [0.5, 3.5, 6])
P.show()
コード例 #50
0
    pass

if calc_greensfcn_accel:
    plotargs.extend(
        [t_heatsim2 + dt_heatsim2 / 2.0, Tg_accel[1:, measj, measi], 'o-'])
    legendargs.append('Integral of Green\'s functions (accelerated)')
    pass

if load_comsol:
    plotargs.extend([t_comsol, T_comsol, '*-.'])
    legendargs.append('COMSOL')
    pass

pl.figure(1)
pl.clf()
pl.loglog(*plotargs, markersize=8, linewidth=4, markeredgewidth=1)
pl.legend(legendargs, fontsize=12)
pl.xlabel('Time (s)')
pl.ylabel('Temperature (K)')
pl.grid()
pl.axis([1e-2, 20, 5, 100])
pl.savefig("/tmp/flatbottomhole.png", dpi=300)
#pl.show()

pl.figure(2)
pl.clf()
pl.loglog(*plotargs, markersize=8, linewidth=4, markeredgewidth=1)
pl.legend(legendargs, fontsize=12)
pl.xlabel('Time (s)')
pl.ylabel('Temperature (K)')
pl.grid()
コード例 #51
0
ファイル: anderson.py プロジェクト: wangxinzhi0/remodnav
def remodnav_on_anderson_mainseq(superimp="trials"):
    """ by default will make main sequences for each trial/file.
    superimp = "stimulus" for superimposed main sequences of each stimulus 
    type"""
    for stimtype in ('img', 'dots', 'video'):
        #for stimtype in ('img', 'video'):
        if superimp == "stimulus":
            pl.figure(figsize=(6, 4))

        coder = 'MN'
        print(stimtype, coder)
        fixation_durations = []
        saccade_durations = []
        pso_durations = []
        purs_durations = []
        for fname in labeled_files[stimtype]:
            data, target_labels, target_events, px2deg, sr = load_anderson(
                stimtype, fname.format(coder))

            clf = EyegazeClassifier(
                px2deg=px2deg,
                sampling_rate=sr,
                pursuit_velthresh=5.,
                noise_factor=3.0,
                lowpass_cutoff_freq=10.0,
            )
            p = clf.preproc(data)
            events = clf(p)
            events = pd.DataFrame(events)
            saccades = events[events['label'] == 'SACC']
            isaccades = events[events['label'] == 'ISAC']
            hvpso = events[(events['label'] == 'HPSO') |
                           (events['label'] == 'IHPS')]
            lvpso = events[(events['label'] == 'LPSO') |
                           (events['label'] == 'ILPS')]

            if superimp == "trials":
                pl.figure(figsize=(6, 4))
            for ev, sym, color, label in ((saccades, '.', 'xkcd:green grey',
                                           'Segment defining saccade'),
                                          (isaccades, '.', 'xkcd:dark olive',
                                           'Saccades'), (hvpso, '+',
                                                         'xkcd:pinkish',
                                                         'High velocity PSOs'),
                                          (lvpso, '+', 'xkcd:wine',
                                           'PSOs'))[::-1]:
                pl.loglog(ev['amp'],
                          ev['peak_vel'],
                          sym,
                          color=color,
                          alpha=1,
                          lw=1,
                          label=label)

            pl.ylim((10.0, 1000))  #previously args.max_vel, put this back in
            pl.xlim((0.01, 40.0))
            pl.legend(loc=4)
            pl.ylabel('peak velocities (deg/s)')
            pl.xlabel('amplitude (deg)')
            if superimp == "trials":
                pl.savefig('{}_{}_remodnav_on_testdata_mainseq.svg'.format(
                    stimtype, fname[0:15]),
                           bbox_inches='tight',
                           format='svg')

        if superimp == "stimulus":
            pl.savefig('{}_remodnav_on_testdata_superimp_mainseq.svg'.format(
                stimtype, fname[0:15]),
                       bbox_inches='tight',
                       format='svg')
        pl.close('all')
コード例 #52
0
ファイル: plot_phase.py プロジェクト: hasanmoudud/darcoda
    norm = unit_d / mp

    dd = []
    tk = []
    h2 = []

    l = 0
    ddMax = 0.
    offSet = 0.
    for line in f:
        values = line.split()
        dd.append(float(values[6]))
        tk.append(float(values[10]))
        tk[l] = tk[l] / dd[l] * (unit_l / unit_t)**2 * mp / kb
        h2.append(float(values[19]))
        dd[l] *= norm
        l += 1
    f.close()
    filename = "phase" + number + ".png"

    print numpy.min(dd), numpy.max(dd)
    print numpy.min(tk), numpy.max(tk)

    pylab.figure()
    pylab.ylabel("T/mu")
    pylab.xlabel("#/cc")
    CP = pylab.loglog(dd, tk, '.', markersize=0.5)
    pylab.axis([1e-1, 1e6, 1e1, 1e4])
    pylab.savefig(filename)
コード例 #53
0
ファイル: anderson.py プロジェクト: wangxinzhi0/remodnav
def preproc_on_anderson_mainseq():
    #for sequentially making main sequences of all the available files
    for stimtype in ('img', 'dots', 'video'):
        #for stimtype in ('img', 'video'):
        for coder in ('MN', 'RA'):
            print(stimtype, coder)
            fixation_durations = []
            saccade_durations = []
            pso_durations = []
            purs_durations = []
            for fname in labeled_files[stimtype]:
                data, target_labels, target_events, px2deg, sr = load_anderson(
                    stimtype, fname.format(coder))

                clf = EyegazeClassifier(
                    px2deg=px2deg,
                    sampling_rate=sr,
                    pursuit_velthresh=5.,
                    noise_factor=3.0,
                    lowpass_cutoff_freq=10.0,
                )
                pproc = clf.preproc(data)
                pproc_df = pd.DataFrame(pproc)
                target_events_df = pd.DataFrame(target_events)

                saccade_events = target_events_df[target_events_df.label ==
                                                  "SACC"]
                peak_vels = []
                amp = []
                for row in target_events_df.itertuples():
                    peak_vels.append(
                        pproc_df.vel.loc[row.start_index:row.end_index].max())
                    amp.append ((((pproc_df.x.loc[row.start_index] - pproc_df.x.loc[row.end_index]) ** 2 + \
                    (pproc_df.y.loc[row.start_index] - pproc_df.y.loc[row.end_index]) ** 2) ** 0.5) * px2deg)

                peaks_amps_df = pd.DataFrame({
                    'peak_vels': peak_vels,
                    'amp': amp
                })
                target_events_df = pd.concat([target_events_df, peaks_amps_df],
                                             axis=1)

                saccades = target_events_df[target_events_df['label'] ==
                                            'SACC']
                pso = target_events_df[target_events_df['label'] == 'PSO']

                pl.figure(figsize=(6, 4))
                for ev, sym, color, label in ((saccades, '.', 'black',
                                               'saccades'),
                                              (pso, '+', 'xkcd:burnt sienna',
                                               'PSOs'))[::-1]:
                    pl.loglog(ev['amp'],
                              ev['peak_vels'],
                              sym,
                              color=color,
                              alpha=.2,
                              lw=1,
                              label=label)

                pl.ylim(
                    (10.0, 1000))  #previously args.max_vel, put this back in
                pl.xlim((0.01, 40.0))
                pl.legend(loc=4)
                pl.ylabel('peak velocities (deg/s)')
                pl.xlabel('amplitude (deg)')
                pl.tick_params(which='both', direction='in')
                pl.savefig('{}_{}_{}_mainseq_preproc_on_anderson.svg'.format(
                    stimtype, coder, fname[0:15]),
                           bbox_inches='tight',
                           format='svg')

                print(len(peak_vels))
                print(len(amp))
コード例 #54
0
ファイル: anderson.py プロジェクト: wangxinzhi0/remodnav
def preproc_on_anderson_mainseq_superimp(superimp="coders"):
    """ by default will make main sequences for each coder for each file
    "stimulus" for superimposed main sequences of each stimulus type"""
    #for making main sequences with Human coders superimposed on one another

    for stimtype in ('img', 'dots', 'video'):
        #for stimtype in ('img', 'video'):
        if superimp == "stimulus":
            pl.figure(figsize=(6, 4))

        for coder in ('MN', 'RA'):
            print(stimtype, coder)
            fixation_durations = []
            saccade_durations = []
            pso_durations = []
            purs_durations = []
            for fname in labeled_files[stimtype]:
                data, target_labels, target_events, px2deg, sr = load_anderson(  #change to load_anderson
                    stimtype, fname.format(coder))

                clf = EyegazeClassifier(
                    px2deg=px2deg,
                    sampling_rate=sr,
                    pursuit_velthresh=5.,
                    noise_factor=3.0,
                    lowpass_cutoff_freq=10.0,
                )
                pproc = clf.preproc(data)
                pproc_df = pd.DataFrame(pproc)
                target_events_df = pd.DataFrame(target_events)

                saccade_events = target_events_df[target_events_df.label ==
                                                  "SACC"]
                peak_vels = []
                amp = []
                for row in target_events_df.itertuples():
                    peak_vels.append(
                        pproc_df.vel.loc[row.start_index:row.end_index].max())
                    amp.append ((((pproc_df.x.loc[row.start_index] - pproc_df.x.loc[row.end_index]) ** 2 + \
                    (pproc_df.y.loc[row.start_index] - pproc_df.y.loc[row.end_index]) ** 2) ** 0.5) * px2deg)

                peaks_amps_df = pd.DataFrame({
                    'peak_vels': peak_vels,
                    'amp': amp
                })
                target_events_df = pd.concat([target_events_df, peaks_amps_df],
                                             axis=1)

                saccades = target_events_df[target_events_df['label'] ==
                                            'SACC']
                pso = target_events_df[target_events_df['label'] == 'PSO']

                if coder == 'MN':
                    if superimp == "coders":
                        pl.figure(figsize=(6, 4))
                    for ev, sym, color, label in ((saccades, '.', 'red',
                                                   'saccades'),
                                                  (pso, '+', 'red',
                                                   'PSOs'))[::-1]:
                        pl.loglog(ev['amp'],
                                  ev['peak_vels'],
                                  sym,
                                  color=color,
                                  alpha=1,
                                  lw=1,
                                  label=label)

                    pl.ylim((
                        10.0,
                        1000))  #TODO previously args.max_vel, put this back in
                    pl.xlim((0.01, 40.0))
                    pl.legend(loc=4)
                    pl.ylabel('peak velocities (deg/s)')
                    pl.xlabel('amplitude (deg)')
                    pl.tick_params(which='both', direction='in')

                    superimp_figure_index = 1

                elif coder == 'RA':
                    if superimp == "coders":
                        pl.figure(superimp_figure_index)
                    for ev, sym, color, label in ((saccades, '.', 'blue',
                                                   'saccades'),
                                                  (pso, '+', 'blue',
                                                   'PSOs'))[::-1]:
                        pl.loglog(ev['amp'],
                                  ev['peak_vels'],
                                  sym,
                                  color=color,
                                  alpha=1,
                                  lw=1,
                                  label=label)
                    if superimp == "coders":
                        pl.savefig(
                            '{}_{}_{}_mainseq_preproc_on_anderson_superimposed.svg'
                            .format(stimtype, coder, fname[0:15]),
                            bbox_inches='tight',
                            format='svg')

                    superimp_figure_index += 1

                print(len(peak_vels))
                print(len(amp))
        if superimp == "stimulus":
            pl.savefig(
                '{}_mainseq_preproc_on_anderson_superimposed.svg'.format(
                    stimtype),
                bbox_inches='tight',
                format='svg')

        # Closing set of plots made for each stimulus type
        pl.close('all')
コード例 #55
0
ファイル: speedup.py プロジェクト: devin-petersohn/docs
]

r_indel = []
r_vc_no_mask = []

for i in range(len(indel)):

    r_indel.append(1.0 / indel[i])
    r_vc_no_mask.append(1.0 / vc_no_mask[i])

figure()

title("Runtime vs. number of cores for high coverage NA12878")
ylabel("Runtime (minutes)")
xlabel("Number of cores")
loglog(cores, r_indel, color='orange', basex=2, basey=2, label="Realignment")
loglog(ideal,
       r_indel_ideal,
       color='orange',
       linestyle='dotted',
       basex=2,
       basey=2)
loglog(cores, r_vc_no_mask, color='blue', basex=2, basey=2, label="Genotyping")
loglog(ideal,
       r_vc_no_mask_ideal,
       color='blue',
       linestyle='dotted',
       basex=2,
       basey=2)
legend(loc=2)
コード例 #56
0
mse = [np.linalg.lstsq(M, f[:, i])[0]
       for i in range(9)]  # Calculating The Least Squares Solution
mse = np.asarray(mse)  # Converting The Input Into An Array
errorA = abs(mse[:, 0] - 1.05)  # Absolute Value Of Error
errorB = abs(mse[:, 1] + 0.105)  # Absolute Value Of Error
pylab.plot(sigma, errorA, 'ro--', label='Aerr')  # Plotting errorA
pylab.plot(sigma, errorB, 'go--', label='Berr')  # Plotting errorB
pylab.legend(
    loc='upper left')  # Placing A Legend On The Top Left Corner Of The Graph
pylab.xlabel(r'Noice Standard Deviation$\rightarrow$',
             fontsize=15)  # Setting The Label For The x-axis
pylab.ylabel(r'MS error$\rightarrow$',
             fontsize=15)  # Setting The Label For The y-axis
pylab.grid(True)  # Displaying The Grid
pylab.show()  # Displaying The Figure

pylab.figure(4)  # Creating A New Figure
pylab.loglog(sigma, errorA, 'ro',
             label='Aerr')  # Making A Plot With Log Scaling On Both Axis
pylab.stem(sigma, errorA, '-ro')  # Creating A Stem Plot
pylab.loglog(sigma, errorB, 'go',
             label='Berr')  # Making A Plot With Log Scaling On Both Axis
pylab.stem(sigma, errorB, '-go')  # Creating A Stem Plot
pylab.legend(
    loc='upper left')  # Placing A Legend On The Top Left Corner Of The Graph
pylab.xlabel(r'$\sigma_{n}\rightarrow$',
             fontsize=15)  # Setting The Label For The x-axis
pylab.ylabel(r'MS error$\rightarrow$',
             fontsize=15)  # Setting The Label For The y-axis
pylab.grid(False)  # Removing The Grid
pylab.show()  # Displaying The Figure
コード例 #57
0
ファイル: convergence_test_2.py プロジェクト: mchandra/Bolt
# ppm_err = check_error(params)
# ppm_con = np.polyfit(np.log10(N), np.log10(ppm_err), 1)

# params.reconstruction_method_in_p = 'minmod'

# minmod_err = check_error(params)
# minmod_con = np.polyfit(np.log10(N), np.log10(minmod_err), 1)

# params.reconstruction_method_in_p = 'piecewise-constant'

# pc_err = check_error(params)
# pc_con = np.polyfit(np.log10(N), np.log10(pc_err), 1)

print('Error with WENO5 reconstruction:', weno5_err)
print('Convergence with WENO5 reconstruction:', weno5_con[0])
# print('Convergence with PPM reconstruction:', ppm_con[0])
# print('Convergence with minmod reconstruction:', minmod_con[0])
# print('Convergence with piecewise-constant reconstruction:', pc_con[0])

pl.loglog(N, weno5_err, '-o',label = 'Numerical')
# pl.loglog(N, ppm_err, '-o', label = 'PPM')
# pl.loglog(N, minmod_err, '-o', label = 'minmod')
# pl.loglog(N, pc_err, '-o', label = 'Piecewise-Constant')
# pl.loglog(N, 1e-3/N, '--', color = 'black', label = r'$O(N^{-1})$')
pl.loglog(N, weno5_err[0] * 32**2/N**2, '--', color = 'black', label = r'$O(N^{-2})$')
pl.xlabel(r'$N$')
pl.ylabel('Error')
pl.legend()
# pl.title('With Upwind-Flux Riemann Solver')
pl.savefig('convergenceplot.png')
コード例 #58
0
    x = sl.PartialPivot(A[i], v[i])
    stop = time()
    t_duration_2.append(stop - start)
    v_sol = np.dot(A[i], x)
    error_2.append(np.mean(np.abs(v[i] - v_sol)))

#LU decomp execution and timer
for i in range(M):
    start = time()
    x = np.linalg.solve(A[i], v[i])
    stop = time()
    t_duration_3.append(stop - start)
    v_sol = np.dot(A[i], x)
    error_3.append(np.mean(np.abs(v[i] - v_sol)))

#plot the error and time data of each method on a log-log plot
plt.figure()
plt.title("Performance of Linear System Solution Methods")
plt.ylabel("Ave error of solution, $log(mean(|v_{computed}-v_{actual}|)$")
plt.xlabel("Computation time for solution, $log(t)$")
plt.grid()
plt.loglog()
plt.plot(t_duration_1,
         error_1,
         '.',
         label='Gaussian Elimination',
         color='blue')
plt.plot(t_duration_2, error_2, '.', label='Partial Pivoting', color='orange')
plt.plot(t_duration_3, error_3, '.', label='LU Decomposition', color='red')
plt.legend()
plt.savefig("lin_sol.png")
コード例 #59
0
ファイル: beam_src_vs_ha.py プロジェクト: nkern/capo
        else:
            bp_fit = n.polyval(bp_cal[filetype], afreqs).clip(.1, 10)**2
        spec /= bp_fit

        src_poly = n.polyfit(n.log10(afreqs / src.mfreq), n.log10(spec), deg=1)
        n.set_printoptions(threshold=n.nan)
        print 'bp =', list(bp_poly)
        print "'%s':" % src.src_name + "{ 'jys':10**%f, 'index':  %f , }," % (
            src_poly[-1], src_poly[-2])
        print 'RMS residual:', n.sqrt(
            n.average(
                (spec -
                 10**n.polyval(src_poly, n.log10(afreqs / src.mfreq)))**2))
        if n.all(spec <= 0): continue

        if not opts.quiet:
            p.loglog(afreqs, spec, color + '.', label='Measured')
            p.loglog(afreqs,
                     10**n.polyval(src_poly, n.log10(afreqs / src.mfreq)),
                     color + '-',
                     label='Fit Power Law')
    if not opts.quiet:
        #p.loglog(afreqs, src.jys, color+':', label='%f, %s' % (src._jys, str(src.index)))
        p.xticks(n.arange(.1, .2, .02), ['100', '120', '140', '160', '180'])
        p.xlim(afreqs[0], afreqs[-1])
        p.ylim(3, 3e3)
        p.grid()
        p.title(src.src_name)
        #p.legend()
p.show()
コード例 #60
0
def figs(event, fitnum, fignum):
    #Define class
    class FixedOrderFormatter(plt.matplotlib.ticker.ScalarFormatter):
        """Formats axis ticks using scientific notation with a constant order of magnitude"""
        def __init__(self, order_of_mag=0, useOffset=True, useMathText=False):
            self._order_of_mag = order_of_mag
            ScalarFormatter.__init__(self,
                                     useOffset=useOffset,
                                     useMathText=useMathText)

        def _set_orderOfMagnitude(self, range):
            """Over-riding this to avoid having orderOfMagnitude reset elsewhere"""
            self.orderOfMagnitude = self._order_of_mag

    print('MARK: ' + time.ctime() + ' : Plot Figures')

    obj = event.eventname
    fit = event.fit[fitnum]
    if hasattr(fit, 'stepsize'):
        stepsize = fit.stepsize
    elif hasattr(event.params, 'stepsize'):
        stepsize = event.params.stepsize
    else:
        stepsize = 100
    period = round(event.period, 2)
    nbins = event.params.nbins
    width = [8, 8, 8, 8, 8, 8, 8, 8, 8]
    height = [6, 6, 6, 7.5, 6, 4, 8, 6, 6]
    numfigs = 10
    #binst, binend   = min(phase), max(phase)
    #binsz           = (binend - binst) / nbins
    #binphase        = np.arange(nbins+1) * binsz + binst

    #LOAD allparams FOR CURRENT FIT
    try:
        print('Loading ' + fit.allparamsfile)
        fit.allparams = np.load(fit.allparamsfile)
    except:
        print('Could not load allparams file.')

    #BIN DATA
    fit.binres = np.zeros(nbins)
    fit.binresstd = np.zeros(nbins)
    flatline = np.zeros(nbins)
    for j in range(nbins):
        start = int(1. * j * fit.nobj / nbins)
        end = int(1. * (j + 1) * fit.nobj / nbins)
        fit.binres[j] = np.mean(fit.residuals[start:end])
        fit.binresstd[j] = np.std(fit.residuals[start:end])

    #Assign abscissa time unit (orbits or days)
    if hasattr(event.params, 'timeunit') == False:
        event.params.timeunit = 'orbits'
    if event.params.timeunit == 'orbits':
        tuall = event.phase.flatten()  #Include all frames
        timeunit = fit.phase  #Use only clipped frames
        timeunituc = fit.phaseuc
        abscissa = fit.binphase  #Binned clipped frames
        abscissauc = fit.binphaseuc  #Binned unclipped
        xlabel = 'Orbital Phase (' + str(round(period, 2)) + '-day period)'
    elif event.params.timeunit == 'days-utc':
        tuall = event.bjdutc.flatten() - event.params.tuoffset
        timeunit = fit.bjdutc - event.params.tuoffset
        timeunituc = fit.bjdutcuc - event.params.tuoffset
        abscissa = fit.binbjdutc - event.params.tuoffset
        abscissauc = fit.binbjdutcuc - event.params.tuoffset
        xlabel = 'BJD_UTC - ' + str(event.params.tuoffset)
    elif event.params.timeunit == 'days-tdb':
        tuall = event.bjdtdb.flatten() - event.params.tuoffset
        timeunit = fit.bjdtdb - event.params.tuoffset
        timeunituc = fit.bjdtdbuc - event.params.tuoffset
        abscissa = fit.binbjdtdb - event.params.tuoffset
        abscissauc = fit.binbjdtdbuc - event.params.tuoffset
        xlabel = 'BJD_TDB - ' + str(event.params.tuoffset)
    elif event.params.timeunit == 'days':
        tuall = event.bjdutc.flatten() - event.params.tuoffset
        timeunit = fit.bjdutc - event.params.tuoffset
        timeunituc = fit.bjdutcuc - event.params.tuoffset
        abscissa = fit.binbjdutc - event.params.tuoffset
        abscissauc = fit.binbjdutcuc - event.params.tuoffset
        xlabel = 'BJD - ' + str(event.params.tuoffset)

    #PLOT FULL DATA WITH MEDIAN MCMC ECLIPSE
    plt.figure(fignum * numfigs + 901, figsize=(width[0], height[0]))
    plt.clf()
    a = plt.axes([0.15, 0.10, 0.8, 0.80])
    plt.plot(timeunituc, fit.fluxuc, 'k.', ms=1, label='Raw Data')
    #plt.plot(abscissa,fit.binmedianfit,'r--', label='Median Fit', lw=2)
    plt.plot(abscissa, fit.binbestfit, 'b-', label='Best Fit', lw=2)
    plt.xticks(size=14)
    plt.yticks(size=14)
    plt.xlabel(xlabel, size=14)
    plt.ylabel(r'Flux ($\mu Jy$)', size=14)
    plt.legend(loc='best')
    plt.savefig(event.modeldir + "/" + obj + "-fig" +
                str(fignum * numfigs + 901) + "-" + fit.saveext + "-full.ps",
                dpi=300)
    plt.suptitle(event.params.planetname + ' Data With Eclipse Models',
                 size=18)
    plt.savefig(event.modeldir + "/" + obj + "-fig" +
                str(fignum * numfigs + 901) + "-" + fit.saveext + "-full.png",
                dpi=300)

    #PLOT BINNED AND MEDIAN MCMC ECLIPSE DATA
    plt.figure(fignum * numfigs + 902, figsize=(width[1], height[1]))
    plt.clf()
    a = plt.axes([0.15, 0.10, 0.8, 0.80])
    plt.errorbar(abscissauc,
                 fit.binfluxuc,
                 fit.binstduc,
                 fmt='ko',
                 ms=4,
                 linewidth=1,
                 label='Binned Data')
    plt.plot(abscissa, fit.binnoecl, 'k-', label='No Eclipse')
    #plt.plot(abscissa,fit.binmedianfit,'r--', label='Median Fit', lw=2)
    plt.plot(abscissa, fit.binbestfit, 'b-', label='Best Fit', lw=2)
    plt.xticks(size=14)
    plt.yticks(size=14)
    plt.xlabel(xlabel, size=14)
    plt.ylabel(r'Flux ($\mu Jy$)', size=14)
    plt.legend(loc='best')
    plt.savefig(event.modeldir + "/" + obj + "-fig" +
                str(fignum * numfigs + 902) + "-" + fit.saveext + "-bin.ps",
                dpi=300)
    plt.suptitle('Binned ' + event.params.planetname +
                 ' Data With Eclipse Models',
                 size=18)
    plt.savefig(event.modeldir + "/" + obj + "-fig" +
                str(fignum * numfigs + 902) + "-" + fit.saveext + "-bin.png",
                dpi=300)
    #plt.text(min(phaseuc), max(binflux), model[0] + ', ' + model[1] + ', ' + model[2])

    #PLOT NORMALIZED BINNED AND MEDIAN MCMC ECLIPSE DATA
    plt.figure(fignum * numfigs + 903, figsize=(width[2], height[2]))
    plt.clf()
    a = plt.axes([0.15, 0.35, 0.8, 0.55])
    plt.errorbar(abscissauc,
                 fit.normbinflux,
                 fit.normbinsd,
                 fmt='ko',
                 ms=4,
                 linewidth=1,
                 label='Binned Data')
    #plt.plot(abscissa, fit.normbinmedian,'r--', label='Median Fit', lw=2)
    plt.plot(timeunit, fit.normbestfit, 'b-', label='Best Fit', lw=2)
    #plt.xticks(size=16)
    plt.setp(a.get_xticklabels(), visible=False)
    plt.yticks(size=14)
    plt.ylabel('Normalized Flux', size=14)
    plt.legend(loc='best')
    xmin, xmax = plt.xlim()
    plt.axes([0.15, 0.1, 0.8, 0.2])
    #PLOT RESIDUALS WITH BESTFIT LINE
    fit.binresfit = fit.bestlinear[0] * fit.binphase + fit.bestlinear[1]
    #plt.errorbar(fit.binphase,fit.binres,fit.binresstd,fmt='ko',ms=4,linewidth=1)
    plt.plot(abscissa, fit.binres / fit.mflux, 'ko', ms=4)
    plt.plot(abscissa, flatline, 'k-', lw=2)
    #plt.plot(fit.binphase, fit.binresfit/fit.mflux,'k-', label='Linear Fit')
    plt.xlim(xmin, xmax)
    #plt.title(event.params.planetname + ' Binned Residuals With Linear Fit')
    plt.xticks(size=14)
    plt.yticks(size=14)
    plt.xlabel(xlabel, size=14)
    plt.ylabel('Residuals', size=14)
    #plt.legend(loc='best')
    plt.savefig(event.modeldir + "/" + obj + "-fig" +
                str(fignum * numfigs + 903) + "-" + fit.saveext + "-norm.ps",
                dpi=300)
    plt.suptitle('Normalized Binned ' + event.params.planetname +
                 ' Data With Eclipse Models',
                 size=18)
    plt.savefig(event.modeldir + "/" + obj + "-fig" +
                str(fignum * numfigs + 903) + "-" + fit.saveext + "-norm.png",
                dpi=300)

    #MCMC CORRELATION B/W FREE PARAMETERS
    plt.ioff()
    numfp = fit.nonfixedpars.size
    if numfp <= 7:
        k = 1
        m = 1
        plt.figure(fignum * numfigs + 904, figsize=(width[3], height[3]))
        plt.clf()
        plt.subplots_adjust(left=0.15,
                            right=0.95,
                            bottom=0.15,
                            top=0.95,
                            hspace=0.15,
                            wspace=0.15)
        for i in fit.nonfixedpars[1:numfp]:
            n = 0
            for j in fit.nonfixedpars[0:numfp - 1]:
                #plt.suptitle(event.params.planetname + ' Correlation Between Free Parameters',size=24)
                if i > j:
                    a = plt.subplot(numfp - 1, numfp - 1, k)
                    a.set_axis_bgcolor(
                        plt.cm.YlOrRd(np.abs(fit.paramcorr[m, n])))
                    if fit.parname[i].startswith('System Flux'):
                        a.yaxis.set_major_formatter(
                            plt.matplotlib.ticker.FormatStrFormatter('%0.0f'))
                    if fit.parname[j].startswith('System Flux'):
                        a.xaxis.set_major_formatter(
                            plt.matplotlib.ticker.FormatStrFormatter('%0.0f'))
                    if j == fit.nonfixedpars[0]:
                        plt.yticks(size=11)
                        s = fit.parname[i].replace(',', '\n')
                        plt.ylabel(s, size=12)
                    else:
                        a = plt.yticks(visible=False)
                    if i == fit.nonfixedpars[numfp - 1]:
                        plt.xticks(size=11, rotation=90)
                        s = fit.parname[j].replace(',', '\n')
                        plt.xlabel(s, size=12)
                    else:
                        a = plt.xticks(visible=False)
                    plt.plot(fit.allparams[j, 0::stepsize],
                             fit.allparams[i, 0::stepsize], 'b,')  #,ms=2)
                k += 1
                n += 1
            m += 1
        a = plt.subplot(numfp - 1, numfp - 1, numfp - 1, frameon=False)
        a.yaxis.set_visible(False)
        a.xaxis.set_visible(False)
        a = plt.imshow([[0, 1], [0, 0]], cmap=plt.cm.YlOrRd, visible=False)
        a = plt.text(1.4,
                     0.5,
                     '|Correlation Coefficients|',
                     rotation='vertical',
                     ha='center',
                     va='center')
        a = plt.colorbar()
        plt.savefig(event.modeldir + "/" + obj + "-fig" +
                    str(fignum * numfigs + 904) + "-" + fit.saveext +
                    "-corr.png",
                    dpi=300)
        plt.savefig(event.modeldir + "/" + obj + "-fig" +
                    str(fignum * numfigs + 904) + "-" + fit.saveext +
                    "-corr.ps",
                    dpi=300)
    else:
        #MCMC CORRELATION B/W FREE PARAMETERS
        #THIS VERSION SPLITS THE PARAMETERS INTO 3 FIGURES
        #SHOULD BE USED WHEN THE NUMBER OF FREE PARAMETERS IS LARGE (> 7)
        num1 = int(np.ceil((numfp - 1) / 2)) + 1
        num2 = int(np.ceil((numfp - 1) / 2.)) + 1
        #Part 1
        k = 1
        m = 1
        plt.figure(fignum * numfigs + 904, figsize=(width[3], height[3]))
        plt.clf()
        for i in fit.nonfixedpars[1:num1]:
            n = 0
            for j in fit.nonfixedpars[0:num1 - 1]:
                #plt.suptitle(event.params.planetname + ' Correlation Between Free Parameters',size=24)
                if i > j:
                    a = plt.subplot(num1 - 1, num1 - 1, k)
                    a.set_axis_bgcolor(
                        plt.cm.YlOrRd(np.abs(fit.paramcorr[m, n])))
                    if fit.parname[i].startswith('System Flux'):
                        a.yaxis.set_major_formatter(
                            plt.matplotlib.ticker.FormatStrFormatter('%0.0f'))
                    if fit.parname[j].startswith('System Flux'):
                        a.xaxis.set_major_formatter(
                            plt.matplotlib.ticker.FormatStrFormatter('%0.0f'))
                    if j == fit.nonfixedpars[0]:
                        plt.yticks(size=11)
                        s = fit.parname[i].replace(',', '\n')
                        plt.ylabel(s, size=10)
                    else:
                        a = plt.yticks(visible=False)
                    if i == fit.nonfixedpars[num1 - 1]:
                        plt.xticks(size=11, rotation=90)
                        s = fit.parname[j].replace(',', '\n')
                        plt.xlabel(s, size=10)
                    else:
                        a = plt.xticks(visible=False)
                    plt.plot(fit.allparams[j, 0::stepsize],
                             fit.allparams[i, 0::stepsize], 'b,')  #,ms=2)
                k += 1
                n += 1
            m += 1
        plt.subplots_adjust(left=0.15,
                            right=0.95,
                            bottom=0.15,
                            top=0.95,
                            hspace=0.15,
                            wspace=0.15)
        a = plt.subplot(num1 - 1, num1 - 1, num1 - 1, frameon=False)
        a.yaxis.set_visible(False)
        a.xaxis.set_visible(False)
        a = plt.imshow([[0, 1], [0, 0]], cmap=plt.cm.YlOrRd, visible=False)
        a = plt.text(1.4,
                     0.5,
                     '|Correlation Coefficients|',
                     rotation='vertical',
                     ha='center',
                     va='center')
        a = plt.colorbar()
        plt.savefig(event.modeldir + "/" + obj + "-fig" +
                    str(fignum * numfigs + 904) + "-" + fit.saveext +
                    "-corr1.png",
                    dpi=300)
        plt.savefig(event.modeldir + "/" + obj + "-fig" +
                    str(fignum * numfigs + 904) + "-" + fit.saveext +
                    "-corr1.ps",
                    dpi=300)
        #Part 2
        k = 1
        mprime = m
        nprime = n
        plt.figure(fignum * numfigs + 9005, figsize=(width[3], height[3]))
        plt.clf()
        for i in fit.nonfixedpars[num1:numfp]:
            n = 0
            for j in fit.nonfixedpars[0:num1 - 1]:
                #plt.suptitle(event.params.planetname + ' Correlation Between Free Parameters',size=24)
                a = plt.subplot(num2 - 1, num1 - 1, k)
                a.set_axis_bgcolor(plt.cm.YlOrRd(np.abs(fit.paramcorr[m, n])))
                if fit.parname[i].startswith('System Flux'):
                    a.yaxis.set_major_formatter(
                        plt.matplotlib.ticker.FormatStrFormatter('%0.0f'))
                if fit.parname[j].startswith('System Flux'):
                    a.xaxis.set_major_formatter(
                        plt.matplotlib.ticker.FormatStrFormatter('%0.0f'))
                if j == fit.nonfixedpars[0]:
                    plt.yticks(size=11)
                    s = fit.parname[i].replace(',', '\n')
                    plt.ylabel(s, size=10)
                else:
                    a = plt.yticks(visible=False)
                if i == fit.nonfixedpars[numfp - 1]:
                    plt.xticks(size=11, rotation=90)
                    s = fit.parname[j].replace(',', '\n')
                    plt.xlabel(s, size=10)
                else:
                    a = plt.xticks(visible=False)
                plt.plot(fit.allparams[j, 0::stepsize],
                         fit.allparams[i, 0::stepsize], 'b,')  #,ms=1)
                k += 1
                n += 1
            m += 1
        plt.subplots_adjust(left=0.15,
                            right=0.95,
                            bottom=0.15,
                            top=0.95,
                            hspace=0.15,
                            wspace=0.15)
        plt.savefig(event.modeldir + "/" + obj + "-fig" +
                    str(fignum * numfigs + 904) + "-" + fit.saveext +
                    "-corr2.png",
                    dpi=300)
        plt.savefig(event.modeldir + "/" + obj + "-fig" +
                    str(fignum * numfigs + 904) + "-" + fit.saveext +
                    "-corr2.ps",
                    dpi=300)
        #Part 3
        k = 1
        m = mprime
        plt.figure(fignum * numfigs + 9006, figsize=(width[3], height[3]))
        plt.clf()
        for i in fit.nonfixedpars[num1:numfp]:
            n = nprime
            for j in fit.nonfixedpars[num1 - 1:numfp - 1]:
                #plt.suptitle(event.params.planetname + ' Correlation Between Free Parameters',size=24)
                if i > j:
                    a = plt.subplot(num2 - 1, num2 - 1, k)
                    a.set_axis_bgcolor(
                        plt.cm.YlOrRd(np.abs(fit.paramcorr[m, n])))
                    if fit.parname[i].startswith('System Flux'):
                        a.yaxis.set_major_formatter(
                            plt.matplotlib.ticker.FormatStrFormatter('%0.0f'))
                    if fit.parname[j].startswith('System Flux'):
                        a.xaxis.set_major_formatter(
                            plt.matplotlib.ticker.FormatStrFormatter('%0.0f'))
                    if j == fit.nonfixedpars[num1 - 1]:
                        plt.yticks(size=11)
                        s = fit.parname[i].replace(',', '\n')
                        plt.ylabel(s, size=10)
                    else:
                        a = plt.yticks(visible=False)
                    if i == fit.nonfixedpars[numfp - 1]:
                        plt.xticks(size=11, rotation=90)
                        s = fit.parname[j].replace(',', '\n')
                        plt.xlabel(s, size=10)
                    else:
                        a = plt.xticks(visible=False)
                    plt.plot(fit.allparams[j, 0::stepsize],
                             fit.allparams[i, 0::stepsize], 'b,')  #,ms=1)
                k += 1
                n += 1
            m += 1
        plt.subplots_adjust(left=0.15,
                            right=0.95,
                            bottom=0.15,
                            top=0.95,
                            hspace=0.15,
                            wspace=0.15)
        plt.savefig(event.modeldir + "/" + obj + "-fig" +
                    str(fignum * numfigs + 904) + "-" + fit.saveext +
                    "-corr3.png",
                    dpi=300)
        plt.savefig(event.modeldir + "/" + obj + "-fig" +
                    str(fignum * numfigs + 904) + "-" + fit.saveext +
                    "-corr3.ps",
                    dpi=300)
    plt.ion()

    # PLOT RMS vs. BIN SIZE
    plt.figure(fignum * numfigs + 905, figsize=(width[4], height[4]))
    plt.clf()
    plt.axes([0.12, 0.12, 0.82, 0.8])
    a = plt.loglog(fit.binsz, fit.rms, color='black', lw=1.5, label='RMS')
    a = plt.loglog(fit.binsz,
                   fit.stderr,
                   color='red',
                   ls='-',
                   lw=2,
                   label='Std. Err.')
    a = plt.xlim(0, fit.binsz[-1] * 2)
    a = plt.ylim(fit.rms[-1] / 2., fit.rms[0] * 2.)
    a = plt.xlabel("Bin Size", fontsize=14)
    a = plt.ylabel("RMS", fontsize=14)
    a = plt.xticks(size=14)
    a = plt.yticks(size=14)
    a = plt.legend(loc='upper right')
    a.fontsize = 8
    plt.savefig(event.modeldir + "/" + obj + "-fig" +
                str(fignum * numfigs + 905) + "-" + fit.saveext + "-rms.ps",
                dpi=300)
    a = plt.suptitle(event.params.planetname + ' Correlated Noise', size=18)
    plt.savefig(event.modeldir + "/" + obj + "-fig" +
                str(fignum * numfigs + 905) + "-" + fit.saveext + "-rms.png",
                dpi=300)

    #PLOT POSITION SENSITIVITY AND MODELS
    plt.rcParams.update({'legend.fontsize': 11})
    plt.figure(906 + fignum * numfigs, figsize=(width[5], height[5]))
    plt.clf()
    yround = fit.yuc[0] - fit.y[0]
    xround = fit.xuc[0] - fit.x[0]
    a = plt.subplot(1, 2, 1)
    a = plt.errorbar(yround + fit.binyy,
                     fit.binyflux,
                     fit.binyflstd,
                     fmt='ro',
                     label='Binned Flux')
    a = plt.plot(yround + fit.binyy,
                 fit.binybestip,
                 'k-',
                 lw=2,
                 label='BLISS Map')
    a = plt.xlabel('Pixel Postion in y', size=14)
    a = plt.ylabel('Normalized Flux', size=14)
    a = plt.xticks(rotation=90)
    a = plt.legend(loc='best')
    a = plt.subplot(1, 2, 2)
    a = plt.errorbar(xround + fit.binxx,
                     fit.binxflux,
                     fit.binxflstd,
                     fmt='bo',
                     label='Binned Flux')
    a = plt.plot(xround + fit.binxx,
                 fit.binxbestip,
                 'k-',
                 lw=2,
                 label='BLISS Map')
    a = plt.xlabel('Pixel Postion in x', size=14)
    a = plt.xticks(rotation=90)
    #a = plt.ylabel('Normalized Flux', size=14)
    a = plt.legend(loc='best')
    plt.subplots_adjust(left=0.11,
                        right=0.97,
                        bottom=0.20,
                        top=0.96,
                        wspace=0.20)
    plt.savefig(event.modeldir + "/" + obj + "-fig" +
                str(fignum * numfigs + 906) + "-" + fit.saveext + ".ps",
                dpi=300)
    #a = plt.suptitle('Normalized Binned Flux vs. Position', size=18)
    plt.savefig(event.modeldir + "/" + obj + "-fig" +
                str(fignum * numfigs + 906) + "-" + fit.saveext + ".png",
                dpi=300)

    #HISTOGRAM
    plt.ioff()
    j = 1
    numfp = fit.ifreepars.size
    histheight = np.min((int(4 * np.ceil(numfp / 3.)), height[6]))
    if histheight == 4:
        bottom = 0.23
    elif histheight == 8:
        bottom = 0.13
    else:
        bottom = 0.12
    plt.figure(fignum * numfigs + 907, figsize=(width[6], histheight))
    plt.clf()
    for i in fit.ifreepars:
        a = plt.subplot(np.ceil(numfp / 3.), 3, j)
        if fit.parname[i].startswith('System Flux'):
            a.xaxis.set_major_formatter(
                plt.matplotlib.ticker.FormatStrFormatter('%0.0f'))
        plt.xticks(size=12, rotation=90)
        plt.yticks(size=12)
        #plt.axvline(x=fit.meanp[i,0])
        plt.xlabel(fit.parname[i], size=14)
        a = plt.hist(fit.allparams[i, 0::stepsize],
                     20,
                     label=str(fit.meanp[i, 0]))
        j += 1
    plt.subplots_adjust(left=0.07,
                        right=0.95,
                        bottom=bottom,
                        top=0.95,
                        hspace=0.40,
                        wspace=0.25)
    plt.savefig(event.modeldir + "/" + obj + "-fig" +
                str(fignum * numfigs + 907) + "-" + fit.saveext + "-hist.png",
                dpi=300)
    plt.savefig(event.modeldir + "/" + obj + "-fig" +
                str(fignum * numfigs + 907) + "-" + fit.saveext + "-hist.ps",
                dpi=300)
    plt.ion()

    #FLUX VS POSITION CONTOUR PLOT OF INTERPOLATED INTRA-PIXEL
    if hasattr(fit, 'binipflux'):
        if fit.model.__contains__('nnint'):
            interp = 'nearest'
        else:
            interp = 'bilinear'
        palette = plt.matplotlib.colors.LinearSegmentedColormap(
            'jet3', plt.cm.datad['jet'], 16384)
        palette.set_under(alpha=0.0, color='w')
        binipflux = fit.binipflux
        vmin = binipflux[np.where(binipflux > 0)].min()
        vmax = binipflux.max()
        yround = fit.yuc[0] - fit.y[0]
        xround = fit.xuc[0] - fit.x[0]
        xmin = fit.xygrid[0].min() + xround
        xmax = fit.xygrid[0].max() + xround
        ymin = fit.xygrid[1].min() + yround
        ymax = fit.xygrid[1].max() + yround
        plt.figure(908 + fignum * numfigs, figsize=(width[7], height[7]))
        plt.clf()
        plt.subplots_adjust(left=0.11, right=0.95, bottom=0.10, top=0.90)
        a = plt.imshow(binipflux,
                       cmap=palette,
                       vmin=vmin,
                       vmax=vmax,
                       origin='lower',
                       extent=(xmin, xmax, ymin, ymax),
                       aspect='auto',
                       interpolation=interp)
        a = plt.colorbar(a, pad=0.05, fraction=0.1)
        a = plt.ylabel('Pixel Position in y', size=14)
        a = plt.xlabel('Pixel Position in x', size=14)
        if ymin < -0.5 + yround:
            a = plt.hlines(-0.5 + yround, xmin, xmax, 'k')
        if ymax > 0.5 + yround:
            a = plt.hlines(0.5 + yround, xmin, xmax, 'k')
        if xmin < -0.5 + xround:
            a = plt.vlines(-0.5 + xround, ymin, ymax, 'k')
        if xmax > 0.5 + xround:
            a = plt.vlines(0.5 + xround, ymin, ymax, 'k')
        plt.savefig(event.modeldir + "/" + obj + "-fig" + str(fignum*numfigs+908) + "-" + fit.saveext + \
                    "-fluxContour.ps", dpi=300)
        a = plt.suptitle('BLISS Map', size=18)
        plt.savefig(event.modeldir + "/" + obj + "-fig" + str(fignum*numfigs+908) + "-" + fit.saveext + \
                    "-fluxContour.png", dpi=300)

        #PLOT # OF POINTS/BIN VS POSITION
        lenbinflux = np.zeros(len(fit.wherebinflux))
        for m in range(len(fit.wherebinflux)):
            lenbinflux[m] = len(fit.wherebinflux[m])
        lenbinflux = lenbinflux.reshape(fit.xygrid[0].shape)
        plt.figure(909 + fignum * numfigs, figsize=(width[8], height[8]))
        plt.clf()
        plt.subplots_adjust(left=0.11, right=0.95, bottom=0.10, top=0.90)
        a = plt.imshow(lenbinflux,
                       cmap=palette,
                       vmin=1,
                       vmax=lenbinflux.max(),
                       origin='lower',
                       extent=(xmin, xmax, ymin, ymax),
                       aspect='auto',
                       interpolation=interp)
        a = plt.colorbar(a, pad=0.05, fraction=0.1)
        a = plt.ylabel('Pixel Position in y', size=14)
        a = plt.xlabel('Pixel Position in x', size=14)
        if ymin < -0.5 + yround:
            a = plt.hlines(-0.5 + yround, xmin, xmax, 'k')
        if ymax > 0.5 + yround:
            a = plt.hlines(0.5 + yround, xmin, xmax, 'k')
        if xmin < -0.5 + xround:
            a = plt.vlines(-0.5 + xround, ymin, ymax, 'k')
        if xmax > 0.5 + xround:
            a = plt.vlines(0.5 + xround, ymin, ymax, 'k')
        plt.savefig(event.modeldir + "/" + obj + "-fig" + str(fignum*numfigs+909) + "-" + fit.saveext + \
                    "-densityContour.ps", dpi=300)
        a = plt.suptitle('Pointing Histogram', size=18)
        plt.savefig(event.modeldir + "/" + obj + "-fig" + str(fignum*numfigs+909) + "-" + fit.saveext + \
                    "-densityContour.png", dpi=300)

    #CHMOD ALL POSTSCRIPT FILES
    for files in os.listdir('.'):
        if files.endswith('.ps'):
            os.chmod(files, 0664)  #0664 must be in octal

    del fit.allparams
    return