Пример #1
0
def makeplot(X,hjd,filename,ephem):

    
    if ephem == 1:
        # archive ephem
        T0 = 2452525.374416
    elif ephem == 2:
        # august ephem
        T0 = 2453964.330709

    P = 0.154525
    
    #X = pl.load(filename)
    a = X[:,0][:-1]
    p = X[:,1][:-1]
    x = (X[:,2][:-1]+hjd-T0)/P - int(((X[:,2][:-1]+hjd-T0)/P)[0])
    siga = X[:,3][:-1]
    sigp = X[:,4][:-1]
    
    temp = []
    temp.append(a)
    temp.append(p)
    temp.append(x)
    temp.append(siga)    
    temp.append(sigp)
    
    pl.save(filename[:-4] + 'OP' + '.dat',pl.array(temp).transpose())
Пример #2
0
    def __call__(self,output_fn,init_time=0,final_time=None,**params):
        p=ParamOverrides(self,params)

        if final_time is None:
            final_time=topo.sim.time()

        attrs = p.attrib_names if len(p.attrib_names)>0 else output_fn.attrib_names
        for a in attrs:
            pylab.figure(figsize=(6,4))
            isint=pylab.isinteractive()
            pylab.ioff()
            pylab.grid(True)
            ylabel=p.ylabel
            pylab.ylabel(a+" "+ylabel)
            pylab.xlabel('Iteration Number')

            coords = p.units if len(p.units)>0 else output_fn.units
            for coord in coords:
                y_data=[y for (x,y) in output_fn.values[a][coord]]
                x_data=[x for (x,y) in output_fn.values[a][coord]]
                if p.raw==True:
                    plot_data=zip(x_data,y_data)
                    pylab.save(normalize_path(p.filename+a+'(%.2f, %.2f)' %(coord[0], coord[1])),plot_data,fmt='%.6f', delimiter=',')


                pylab.plot(x_data,y_data, label='Unit (%.2f, %.2f)' %(coord[0], coord[1]))
                (ymin,ymax)=p.ybounds
                pylab.axis(xmin=init_time,xmax=final_time,ymin=ymin,ymax=ymax)

            if isint: pylab.ion()
            pylab.legend(loc=0)
            p.title=topo.sim.name+': '+a
            p.filename_suffix=a
            self._generate_figure(p)
Пример #3
0
 def __create_three_d_model__(self):
     """
     __create_three_d_model__: helper function for create_three_d_model
     """
     heightLevels = pl.linspace(
         0.0, abs(self.Parameters.eh - self.Parameters.sh), self.NumImages)
     # Now, simply find max variance at each pixel 'cluster' in the stack.
     self.ThreeDModel = pl.zeros(self.VoLImageStack[0].shape)
     iterationsSinceUpdate = pl.zeros(
         self.VoLImageStack[0].shape,
         dtype=pl.uint8)  # track num iterations since updated
     print "[x] Creating 3D Model of Size",
     print "(l x w x h): ", self.ThreeDModel.shape[
         0], "x", self.ThreeDModel.shape[1], "x", self.NumImages
     for index, laplacianOfImMatrix in enumerate(self.VoLImageStack):
         pointsInFocus = 0
         for row in pl.arange(laplacianOfImMatrix.shape[0]):
             for col in pl.arange(laplacianOfImMatrix.shape[1]):
                 if laplacianOfImMatrix[row][col] > self.ThreeDModel[row][
                         col] and iterationsSinceUpdate[row][
                             col] < self.Parameters.mai + 1:  # add one so that self.Parameters.mai iterations actually pass before cell is blocked
                     pointsInFocus += 1
                     self.ThreeDModel[row][col] = heightLevels[index]
                     if index > 0:
                         iterationsSinceUpdate[row][col] = 1
                 else:
                     if iterationsSinceUpdate[row][
                             col] != 0:  # make sure cell has stopped being updated BEYOND first iteration
                         iterationsSinceUpdate[row][col] += 1
         print "\t[", index + 1, "] Percentage of Pixel Clusters Updated: ", pointsInFocus, "/", laplacianOfImMatrix.size, "=",
         print float(pointsInFocus) / laplacianOfImMatrix.size
     # This time, store results using numpy since we're dealing with an array object
     with open(internalFilesDir + internalThreeDModel, 'wb') as f:
         pl.save(f, self.ThreeDModel)
Пример #4
0
 def save(self, basename, append=""):
     pylab.save('%s%s.dat' % (basename, append), self.cartesian.data, fmt='%g')
     pylab.save('%s_x%s.dat' % (basename, append), self.cartesian.data.sum(axis=0), fmt='%g')
     pylab.save('%s_y%s.dat' % (basename, append), self.cartesian.data.sum(axis=1), fmt='%g')
     pylab.save('%s_r%s.dat' % (basename, append), self.polar.data.sum(axis=0), fmt='%g')
     pylab.save('%s_encircled%s.dat' % (basename, append), self.polar.data.sum(axis=0).cumsum(), fmt='%g')
     file('%s%s_R50.dat' % (basename, append), 'w').write('%s' % (self.rforfraction(0.5)))
Пример #5
0
def load_default(path, closure):
    from pylab import load, save
    try:
        return load(path)
    except IOError: 
        obj = closure()
        save(obj, path)
        return obj
Пример #6
0
    def OnBSavePointsButton(self, event):
        fn = wx.SaveFileSelector('Save point positions to file', '.txt')
        if fn == None:
            print('No file selected')
            return

        #self.points = pylab.load(fn)
        pylab.save(fn, scipy.array(self.points))
Пример #7
0
    def OnBSavePointsButton(self, event):
        fn = wx.SaveFileSelector('Save point positions to file', '.txt')
        if fn is None:
            print('No file selected')
            return

        #self.points = pylab.load(fn)
        pylab.save(fn, scipy.array(self.points))
Пример #8
0
def getParamCovMat(prefix,dlogpower = 2, theoconstmult = 1.,dlogfilenames = ['dlogpnldloga.dat'],volume=256.**3,startki = 0, endki = 0, veff = [0.]):
    """
    Calculates parameter covariance matrix from the power spectrum covariance matrix and derivative term
    in the prefix directory
    """
    nparams = len(dlogfilenames)

    kpnl = M.load(prefix+'pnl.dat')
    k = kpnl[startki:,0]

    nk = len(k)
    if (endki == 0):
        endki = nk
        
    pnl = M.array(kpnl[startki:,1],M.Float64)
    covarwhole = M.load(prefix+'covar.dat')
    covar = covarwhole[startki:,startki:]
    if len(veff) > 1:
        sqrt_veff = M.sqrt(veff[startki:])
    else:
        sqrt_veff = M.sqrt(volume*M.ones(nk))

    dlogs = M.reshape(M.ones(nparams*nk,M.Float64),(nparams,nk))
    paramFishMat = M.reshape(M.zeros(nparams*nparams*(endki-startki),M.Float64),(nparams,nparams,endki-startki))
    paramCovMat = paramFishMat * 0.

    # Covariance matrices of dlog's
    for param in range(nparams):
        if len(dlogfilenames[param]) > 0:
            dlogs[param,:] = M.load(prefix+dlogfilenames[param])[startki:,1]

    normcovar = M.zeros(M.shape(covar),M.Float64)
    for i in range(nk):
        normcovar[i,:] = covar[i,:]/(pnl*pnl[i])

    M.save(prefix+'normcovar.dat',normcovar)

    f = k[1]/k[0]

    if (volume == -1.):
        volume = (M.pi/k[0])**3

    #theoconst = volume * k[1]**3 * f**(-1.5)/(12.*M.pi**2) #1 not 0 since we're starting at 1
    for ki in range(1,endki-startki):
        for p1 in range(nparams):
            for p2 in range(nparams):
                paramFishMat[p1,p2,ki] = M.sum(M.sum(\
                M.inverse(normcovar[:ki+1,:ki+1]) *
                M.outerproduct(dlogs[p1,:ki+1]*sqrt_veff[:ki+1],\
                               dlogs[p2,:ki+1]*sqrt_veff[:ki+1])))
                
                
        paramCovMat[:,:,ki] = M.inverse(paramFishMat[:,:,ki])

    return k[1:],paramCovMat[:,:,1:]
Пример #9
0
def Signal():
    global E_imfNo
    E_imfNo = np.zeros(50, dtype=np.int)

    # EEMD options
    max_imf = 7
    """
    信号参数:
    N:采样频率500Hz
    tMin:采样开始时间
    tMax:采样结束时间 2*np.pi
    """
    N = 500
    tMin, tMax = 0, 2 * np.pi
    T = np.linspace(tMin, tMax, N)
    # 信号S:是多个信号叠加信号
    S = 3 * np.sin(4 * T) + 4 * np.cos(9 * T) + np.sin(8.11 * T + 1.2)

    # EEMD计算
    eemd = EEMD()
    eemd.trials = 50
    eemd.noise_seed(12345)

    E_IMFs = eemd.eemd(S)
    imfNo = E_IMFs.shape[0]

    # Plot results in a grid
    c = np.floor(np.sqrt(imfNo + 1))
    r = np.ceil((imfNo + 1) / c)

    plt.ioff()
    plt.subplot(r, c, 1)
    plt.plot(T, S, 'r')
    plt.xlim((tMin, tMax))
    plt.title("Original signal")

    i = 1
    for imf in E_IMFs:
        plt.subplot(len(E_IMFs), 1, i)
        plt.plot(imf)
        i += 1

    # for num in range(imfNo):
    #     plt.subplot(r, c, num + 2)
    #     plt.plot(T, E_IMFs[num], 'g')
    #     plt.xlim((tMin, tMax))
    #     plt.title("Imf " + str(num + 1))

    plt.text(0,
             0,
             str(format(i, '.4f')),
             style='italic',
             ha='center',
             wrap=True)
    plt.save("haha.jpg")
Пример #10
0
def write_multi_par_out(check,names,par,t1,t2,filename,func,const,const_names,xbase1='none', xbase2='none', fileno='none',fileout='params.out',qval='none',file_input='input_fit.txt'):
   multi = 'on'
   if check==0:
      write_par_out(names,par,t1,t2,filename,func,const,const_names,xbase1,xbase2,fileno,fileout,multi,qval)
   else:
      f=open(fileout,'a')
      outpar=zeros((1,len(par)+1))
      outpar[0,0]=float(qval)
      outpar[0,1:]=par[:]
      Plab.save(f,outpar)
      f.close()
Пример #11
0
def degraderesolution(prefix,factor,dlogstring):
    covar = M.load(prefix+'covar.dat')
    pnl = M.load(prefix+'pnl.dat')
    dlog = M.load(prefix+dlogstring)[:,1]
    k = pnl[:,0]*1.
    p = pnl[:,1]*1.
    gausspart = M.load(prefix+'gausspart.dat')
    nbins = len(k)

    nongausspart = covar - gausspart

    nongausspartnew = nongausspart[:nbins-factor:factor,:nbins-factor:factor]*0.
    knew = k[:nbins-factor:factor]*0.
    pnew = p[:nbins-factor:factor]*0.
    gausspartnew = gausspart[:nbins-factor:factor,:nbins-factor:factor]*0.
    nbinsnew = len(knew)
    dlognew = dlog[:nbins-factor:factor]*0.

    for i1 in range(0,nbins-factor,factor):
        i1new = i1/factor
        print i1,i1+factor-1,nbins
        print i1new,nbinsnew
        weights = k[i1:i1+factor-1]**3
        sumweights = M.sum(weights)
        pnew[i1new] = M.sum(p[i1:i1+factor-1]*weights)/sumweights
        knew[i1new] = M.sum(k[i1:i1+factor-1]*weights)/sumweights
        dlognew[i1new] = M.sum(dlog[i1:i1+factor-1]*weights)/sumweights

    sqrtkfact = M.sqrt(k[1]/k[0])
        
    for i1 in range(0,nbins-factor,factor):
        i1new = i1/factor
        for i2 in range(0,nbins-factor,factor):
            i2new = i2/factor
                                                                       
            weights2 = M.outer(k[i1:i1+factor-1]**3,k[i2:i2+factor-1]**3)
            sumweights2 = M.sum(M.sum(weights2))
            nongausspartnew[i1new,i2new] = M.sum(M.sum(nongausspart[i1:i1+factor-1,i2:i2+factor-1]*weights2))/sumweights2

            if i1new == i2new:
                vk = (4.*M.pi/3.)*((k[i1+factor-1]*sqrtkfact)**3 - (k[i1]/sqrtkfact)**3)
                gausspartnew[i1new,i2new] = (2.*M.pi)**3 * 2.*(pnew[i1new]**2)/vk
                                                                       
    covarnew = gausspartnew + nongausspartnew

    prefixnew = prefix+'degrade'+str(factor)+'/'
    os.system('mkdir '+prefixnew)
    M.save(prefixnew+'pnl.dat',M.transpose([knew,pnew]), fmt = '%18.16e')
    M.save(prefixnew+'covar.dat',covarnew, fmt = '%18.16e')
    M.save(prefixnew+'gausspart.dat',gausspartnew, fmt = '%18.16e')
    M.save(prefixnew+dlogstring,M.transpose([knew,dlognew]), fmt = '%18.16e')
    M.save(prefix+'nbins.dat',M.array([nbinsnew],shape=(1,1,)), fmt = '%d')
Пример #12
0
    def get_pca(self, rld_df, n_components, plot=False):
        df = rld_df.transpose()
        pca = PCA(n_components=n_components)
        X = pca.fit_transform(df)

        if plot:
            fig, ax = plt.subplots()
            ax.scatter(X[:, 0], X[:, 1])
            for i, txt in enumerate(df.index):
                ax.annotate(txt, (X[i, 0], X[i, 1]))
            plt.tight_layout()
            fn = '{uuid}.png'.format(uuid=uuid.uuid4())
            plt.save(fn)

        cumsum = np.cumsum(pca.explained_variance_ratio_)
        return X, cumsum
Пример #13
0
def save_ROI_spectra_perpixel(subdir, x, y, r, exp_name):
    t = read_spectral_roi(subdir, x, y, r)
    e = excitation_emission_cubes_to_spectra(t)
    savedir = "ROISpectra_outputs/" + str(exp_name) + "perpixel_" + "x" + str(
        x) + "_" + "y" + str(y) + "_" + "r" + str(r) + ".npy"

    return (pl.save(savedir, e))
Пример #14
0
def getInfoCurve():
    """
    Various functions to calculate example parameter error bars as in
    Neyrinck & Szapudi 2007, MNRAS 375, L51
    """

    c = pt.Camb(hubble = 70., ombh2 = 0.05*(0.7)**2, omch2 = 0.25*(0.7)**2)
    c.run()
    c.kextend(-10,60) # necessary to make sigma(m) integral converge well.
    pt.normalizePk(c,0.8) #sigma_8

    outputdir = 'example/'
    #Sheth-Tormen
    h = halo.HaloModel(c,st_big_a = 0., st_little_a=0.707, stq = 0.3, k = 10.**M.arange(-2,1.01,0.25),massdivsperdex=5)
    #For final calculations, should use more massdivsperdex, e.g. 20 (maybe 10 is ok)
    #also, k is really coarse, as you'll see if you run this.

    # get covariance matrix from halo-model trispectrum (saves it in the 'prefix' directory)
    # it also automatically runs halo.getHaloPknl
    halo.getHaloCov(outputdir,c,h)

    # power spectrum at h.k (range of k at which halo model quantities are evaluated)
    M.loglog(h.k,h.pnl)
    M.show()

    # get derivs wrt ln A, tilt
    h.dloga = halo.getdlogPnldCosmoParam(c,h,'scalar_amp',linlog='log')
    h.dtilt = halo.getdlogPnldCosmoParam(c,h,'scalar_spectral_index',linlog='lin')
    M.loglog(h.k,h.dloga**2,label='ln A')
    M.loglog(h.k,h.dtilt**2,label='tilt')
    M.legend()
    M.show()
    M.save(outputdir+'dlogpnldloga.dat',M.transpose([h.k,h.dloga]),fmt='%6.5e')
    M.save(outputdir+'dlogpnldtilt.dat',M.transpose([h.k,h.dtilt]),fmt='%6.5e')

    # get parameter covariance matrix (just a function of k, since there's only one variable)
    k, covmat = info.getParamCovMat(outputdir,dlogfilenames=['dlogpnldloga.dat','dlogpnldtilt.dat'])

    # plot the unmarginalized error bars in ln A and the tilt,
    # if the matter power spectrum is analyzed from k= k[0] to k.

    M.loglog(k, M.sqrt(covmat[0,0,:]),label='ln A')
    M.loglog(k, M.sqrt(covmat[1,1,:]),label='tilt')

    M.legend()
    M.show()
Пример #15
0
def write_par_out(names,par,t1,t2,filename,func,const,const_names,xbase1='none',xbase2='none',fileno='none',fileout='params.out',multi='off',qval='none',file_input='input_fit.txt'):
   info=get_input(file_input)
   xcol=info['xcol']+' '
   ycol=info['ycol']+' '
   ynorm=info['normcol']+' '
   yerr=info['yerr']+' '

   f=open(fileout,'a')
   sep=' '
   f.write('#'+asctime()+'\n')
   f.write('#'+70*'-'+'\n')
   f.write('#fitted file= '+ filename +'\n')
   f.write('#fitting function= '+ func + '\n')
   f.write('#fitting range= ' + str(t1) + ':' + str(t2) + '\n')
   f.write('#baseline range= ' +str(xbase1)+':' +str(xbase2) + '\n')
   f.write('#xcol='+xcol+'\n')
   f.write('#ycol='+ycol+'\n')
   f.write('#ynorm='+ynorm+'\n')
   f.write('#yerr='+yerr+'\n')
   if const != []:
      f.write('#parameters kept fixed:\n')
      f.write('#'+sep.join(const_names)+'\n')
      if len(const)>1:
         f.write('#'+arraytostring(const)+'\n')
      else:
         f.write('#'+str(const)+'\n')
   if multi=='on':
      f.write('# ycol '+sep.join(names)+'\n')
      outpar=zeros((1,len(par)+1))
      outpar[0,0]=float(qval)
      outpar[0,1:]=par[:]
      Plab.save(f,outpar)
   else:
      f.write('#'+sep.join(names)+'\n')
      if len(par)>1:
         f.write(arraytostring(par)+'\n')
      else:
         f.write(str(par)+'\n')
      f.write(70*'*'+'\n')
   f.close()
Пример #16
0
def makeplot(X,hjd,filename,ephem):

    
    if ephem == 1:
        # archive ephem
        T0 = 2452525.374416
    elif ephem == 2:
        # august ephem
        T0 = 2453964.330709

    P = 0.154525
    
    #X = pl.load(filename)
    x = (X[:,0]+hjd-T0)/P - int(((X[:,0]+hjd-T0)/P)[0])
    y = X[:,1]
    z = X[:,2]    
    
    temp = []
    temp.append(x)
    temp.append(y)
    temp.append(z)
    
    
    pl.save(filename[:-4] + 'OP' + '.dat',pl.array(temp).transpose())
Пример #17
0
from scipy import arange, sin, pi, randn, zeros
import pylab as p

a = 2       # 2 volt amplitude
f = 10      # 10 Hz frequency
sigma = 0.5 # 0.5 volt standard deviation noise

# create the t and v and store them a 2D array X 
t = arange(0.0, 2.0, 0.02)                # an evenly sampled time array
v = a*sin(2*f*pi*t) + sigma*randn(len(t)) # a noisy sine wave
X = zeros((len(t),2))                     # an empty output array
X[:,0] = t                                # add t to the first column 
X[:,1] = v                                # add s to the 2nd column
p.save('data/noisy_sine.dat', X)            # save the output file as ASCII

# plot the arrays t vs v and label the x-axis, y-axis and title
# save the output figure as noisy_sine.png
p.plot(t, v, 'b-')
p.xlabel('time (s)')
p.ylabel('volts (V)')
p.title('A noisy sine wave')
p.grid()
p.savefig('noisy_sine.png', dpi=150)
p.savefig('noisy_sine.eps')
p.show()
Пример #18
0
        raise ValueError, "smooth only accepts 1 dimension arrays."

    if x.size < window_len:
        raise ValueError, "Input vector needs to be bigger than window size."


    if window_len<3:
        return x

    if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
        raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"

    s=numpy.r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]
    #print(len(s))
    if window == 'flat': #moving average
        w=ones(window_len,'d')
    else:
        w=eval('numpy.'+window+'(window_len)')

    y=numpy.convolve(w/w.sum(),s,mode='same')
    return y[window_len-1:-window_len+1]

if __name__=='__main__':
    data=pylab.load("spibgorig.dat")
    print data
    data.transpose()
    bg=pylab.array([data[0],smooth(data[1], 200)])
    bg.transpose()
    print bg
    pylab.save("spibgsmoothed.dat", bg)
Пример #19
0
# kill the low frequency bits > ~ 1hour
k = int(24.0*l*dt)
print k

fft[0:5] = 0.0
fft[-5:] = 0.0

yy = sci.ifft(fft)


pl.subplot(311)
pl.plot(x,y,'.')

pl.subplot(312)
pl.plot(x,yy,'-')

pl.subplot(313)
pl.plot(x,y-yy,'.')

pl.show()

temp = []
temp.append(x)
temp.append(yy)
temp.append(y-yy)

pl.save('HaFF.dat',pl.array(temp).transpose())


Пример #20
0
red = pl.load('Ha_red.dat')[:,0]
#X = pl.load('Ha_deblend.dat')

#red_vel = X[:,2]
#blue_vel = X[:,1]
#phase = X[:,0]


blue_vel = (blue - 6563.0) / 6563.0 * c
red_vel = (red - 6563.0) / 6563.0 * c  
pl.figure(figsize=(6,4))
pl.plot(phase,blue_vel,'bo-')
pl.plot(phase,red_vel,'ro-')
pl.ylim(-750,750)
pl.ylabel('Line Velocity (km/s)')
pl.xlabel('Orbital Phase')
pl.show()


temp = []

temp.append(phase)
temp.append(blue_vel)
temp.append(red_vel)


pl.save('Ha_deblend.dat', pl.array(temp).transpose())



Пример #21
0
_ip.magic("run dirichlet_sparse_stats.py")
plot_results(r, case="0.1")
_ip.magic("run dirichlet_sparse_stats.py")
plot_results(r, case="0.1")
_ip.magic("run dirichlet_sparse_stats.py")
plot_results(r, case="0.1")
p.show()
p.figure()
#[Out]# <matplotlib.figure.Figure object at 0x118a80e90>
_ip.magic("run dirichlet_sparse_stats.py")
plot_results(r, case="0.1")
#?p.save
#?p.savefig
_ip.magic("pwd ")
#[Out]# '/Users/ivan/Homes/master/Documents/Projects/LatentDirichletAllocation/math'
p.save("alpha0.1sparseness.pdf")
p.savefig(fname="alpha0.1sparseness.pdf")
#?p.savefig
p.savefig("alpha0.1sparseness.pdf")
r = get_sparse_for_alpha(alphaval=0.01)
plot_results(r, case="0.01")
p.savefig("Dir_sparseness_a0dot01.pdf")
r01 = get_sparse_for_alpha(alphaval=0.1)
plot_results(r, case="0.1")
#p.savefig("Dir_sparseness_a0dot1.pdf")
plot_results(r01, case="0.1")
p.savefig("Dir_sparseness_a0dot1.pdf")
x = np.zeros(n)
#x = np.zeros(n)
n=4
x = np.zeros(n)
Пример #22
0
def test_save_using_pylab():
    z=np.zeros([3,5])
    path="/tmp/test.pl"
    pl.save(path, z)
Пример #23
0
def multifit(lastcol,outdir,outpref,inputfile='input_fit.txt',doplot='yes'):
   ndataset=int(lastcol)
   if os.path.exists(inputfile) is False:
      reply=raw_input('I need an input file, do you want me to create it?(Y/n)')
      print reply
      if reply=='n':
         exit()
      else:
         print 'creating an input file'
         inputfile= create_input(inputfile)
         f=open(inputfile,'r')
         text=f.read()
         print text
         test = raw_input('Is it ok?(Y/n)')
         if test == 'n':
           print 'Please correct it and launch the program again'
           exit()
#reading input parameters####################################################
   Input_par=get_input(inputfile)
   pars = process_input(Input_par)
   filename=pars[0]
   t1=float(pars[1])
   t2=float(pars[2])
   range_par=pars[3]
   func=pars[4]
   name_const=pars[5]
   const_0=pars[6]
   name_par=pars[7]
   par_0=pars[8]
   xcol=pars[9]
   ycol=pars[10]
   normcol=pars[11]
   #yerr=pars[12]
#input parameters read####################################################
   g=open(filename,'r')
   title=g.readline()
   g.close()
   qs=title.split(' ')
   if doplot=='yes':
      Plab.ion()
      Plab.figure(1)
      axdata=Plab.axes()
      axdata.set_xlabel('t [sec]')
      axdata.set_ylabel('g^2(q,t)')
      ldata1,=axdata.semilogx((1,),(1,),'ko',label='all range')
      axdata.hold(True)
      ldata2,=axdata.semilogx((1,),(1,),'r^',label='fitting range')
      ldata3,=axdata.semilogx((1,),(1,),'k-',label='fit')
      axdata.legend(loc='lower left')
      Plab.draw()
      npars=len(name_par)
      parplot=zeros((ndataset-ycol,npars),dtype=float32)
      n=tuple(range(1,npars+1))
      Plab.figure(2)
      ncols=(1+npars)/2
      nrows=npars/ncols+npars%ncols
      ax=range(npars)
      lpars=range(npars)
      for k in range(npars):
          s=str(ncols)+str(nrows)+str(k+1)
          ax[k]=Plab.subplot(s)
          ax[k].set_xlabel('q [1/Ang]')
          ax[k].set_ylabel(name_par[k])
          ax[k].set_title(name_par[k])
          lpars[k],=ax[k].plot((1,),(1,),'ro-')
      Plab.draw()
      raw_input('Adjust positioning and size of figures, then press RETURN...')
   
   for i in range(ycol,ndataset):
     check=i-ycol
     print 'reading file ', filename, ' ...', 'col no. ', i+1
     x,y,not_used=read_data(filename,xcol,i,normcol)
     print 'read'
     if range_par=='in':
        print 'including data in the range ', t1, ':', t2
     if range_par=='out':
        print 'excluding data in the range ', t1, ':', t2
     if range_par=='all':
        t1=Plab.min(x)
        t2=Plab.max(x)
        print 'using all x-range to fit:[',t1,'-',t2,']'
     xfit,yfit,not_used=fit_region(x,y,t1,t2,range_par)
     y_out,p,f= fitting(xfit,yfit,func,par_0,const_0)
#writing results#################################################
     file_par_out=os.path.join(outdir,outpref+'_fitpar.dat')
     file_data_out=os.path.join(outdir,outpref+'_fitcol'+str(i+1)+'.dat')
     write_multi_par_out(check,name_par,p,t1,t2,filename,func,const_0,name_const,fileout=file_par_out,qval=qs[i+1],file_input=inputfile)
     ytheo=eval(f)
     mat=zeros((len(x),3),dtype=float32)
     mat[:,0]=x
     mat[:,1]=y
     mat[:,2]=ytheo
     fi=open(file_data_out,'w')
     fi.write('# x y fit')
     Plab.save(fi,mat)
     fi.close()
     print 'output written on files ', file_par_out,' and ', file_data_out
#some plots#####################################################
     if doplot=='yes':
       Plab.figure(1)
       ldata1.set_data(x,y)
       ldata2.set_data(xfit,yfit)
       ldata3.set_data(x,ytheo)
       axdata.set_xlim(Plab.min(x),Plab.max(x))
       axdata.set_ylim(Plab.min(y),Plab.max(y))
       Plab.draw()
       q=qs[2:i+2]
       parplot[i-1,:]=p
       q=Plab.transpose(q)
       Plab.figure(2)
       for k in range(npars):
         lpars[k].set_data(q,parplot[:i,k])
         xmin=float(q[0])
         xmax=float(q[-1])
         ax[k].set_xlim(0.9*xmin,1.1*xmax)
         ax[k].set_ylim(0.9*Plab.min(parplot[:i,k]),1.1*Plab.max(parplot[:i,k]))
       Plab.draw()
   if doplot=='yes':
      raw_input( "Press a RETURN to close figures...")
      Plab.close('all')
Пример #24
0
    pl.xlim(0, pl.xlim()[1])
    pl.ylim(pl.ylim()[0], 0)

    coords = (coords[(coords[:, 0] > 5) * \
                     (coords[:, 0] < stack.shape[0] - 5) *\
                     (coords[:, 1] > 5) * \
                     (coords[:, 1] < stack.shape[1] - 5)])

    print("Found %d good windows away from the edges"%len(coords))

    for c in coords:
        pl.plot(c[1], c[0], 'wo', alpha = 0.3)
    pl.title("gaussian std %d, threshold %.1f, labels %d"%(GS,
                                                           thr,
                                            np.sum(goodwindows)))
    pl.savefig(args[0].replace(".npy", "_selectedwindows.pdf"))
    if options.showme: pl.show()
    pl.close()
    # coords = np.array([coords[:, 1], coords[:, 0]])
    pl.save(args[0].replace(".npy", "_coords.npy"), coords[:, 1::-1])
    pl.save(args[0].replace(".npy", "_mask.npy"), mask)
    # print (mask)

    # for i in range(labels.max()+1):
    #    if i in labels:
    #        pl.figure()
    #        pl.imshow(mask[:i].sum(axis=0),  interpolation='nearest',
    #               cmap='gist_gray')
    #        pl.savefig("mask.%04d.png"%i)
    #        pl.close('all')
Пример #25
0
	solution = si.odeint(coupling_optim, y0, \
		   t,printmessg=1,rtol=1e-10,atol=1e-10)

elif (linear!=1):

	solution = si.odeint(coupling_optim_garrick, y0, \
		   t,printmessg=1,rtol=1e-10,atol=1e-10)



total_monomers=total_concentration(solution, box_vol)
#now save the total number of monomers and the time in two separate files

if (linear==1): 

	p.save("number_monomers_linear_binning.dat", total_monomers)

elif (linear !=1):

	p.save("number_monomers_nonlinear_binning.dat", total_monomers)


p.save("time.dat", t)


#check the quality of the simulation by testing mass conservation

mass_tests=total_mass_conservation(solution, vol_grid,box_vol)

print "initial and final total mass in the box are", mass_tests[0], mass_tests[1] ,"respectively"
Пример #26
0
def getHaloCov(prefix,c,h):
    """
    Output halo model covariance matrix, correlation matrix into the directory 'prefix'
    """
    os.system('mkdir '+prefix)

    h.pnl = getHaloPknl(c,h)

    M.save(prefix+'pnl.dat',M.transpose([h.k,h.pnl]), fmt = '%18.16e')

    h.prefix = prefix
    #h.dlogPnldlogA = getdlogPnldlogA(c,h)
    #M.save(prefix+'dlogpnldloga.dat',M.transpose([h.k,h.dlogPnldlogA]),fmt='%6.5e')
    vk = h.k*0.
    vk[0] = (h.k[0]*h.k[1])**1.5 - (h.k[0]**3/h.k[1])**1.5
    for k1 in M.arange(1,len(h.k)-1):
        vk[k1] = (h.k[k1]*h.k[k1+1])**1.5 - (h.k[k1]*h.k[k1-1])**1.5
    vk[-1] = (h.k[-1]**3/h.k[-2])**1.5 - (h.k[-1]*h.k[-2])**1.5
    vk *= 4.*M.pi/3.

    gausspart = M.outer(h.k*0.,h.k*0.)
    for k1 in M.arange(len(h.k)):
        gausspart[k1,k1] = (2.*M.pi)**3 * 2.*(h.pnl[k1]**2)/vk[k1]

    if h.p.outputalltterms == 0:
        t = getHaloTrispec(c,h, adder=gausspart)
    elif h.p.outputalltterms == 1:
        t,t10,t1h,t2h,t3h,t4h = getHaloTrispec(c,h, adder=gausspart)
    elif h.p.outputalltterms == 2:
        t,t10,t1h,t2h22,t2h31,t3hB,t3hnoB,t4hT,t4hnoT = getHaloTrispec(c,h,adder=gausspart)
    
    covar = t*1.

    cocg = h.k*0.
    for k1 in M.arange(len(h.k)):
        cocg[k1] = M.sqrt(covar[k1,k1]/gausspart[k1,k1])

    covar += gausspart
    M.save(prefix+'covar.dat',covar, fmt = '%18.16e')
    M.save(prefix+'gausspart.dat',gausspart, fmt = '%18.16e')
    #t10->pt.dat is the perturbation theory trispectrum by itself.  However,
    #it might not be calculated at high precision in the nonlinear
    #regime, since where other things dominate on small scales, the
    #first pass at calculating it is done with low precision.

    #
    if h.p.outputalltterms == 1:
        M.save(prefix+'pt.dat',t10, fmt = '%18.16e')
        M.save(prefix+'t1h.dat',t1h, fmt = '%18.16e')
        M.save(prefix+'t2h.dat',t2h, fmt = '%18.16e')
        M.save(prefix+'t3h.dat',t3h, fmt = '%18.16e')
        M.save(prefix+'t4h.dat',t4h, fmt = '%18.16e')
    if h.p.outputalltterms == 2:
        M.save(prefix+'pt.dat',t10, fmt = '%18.16e')
        M.save(prefix+'t1h.dat',t1h, fmt = '%18.16e')
        M.save(prefix+'t2h22.dat',t2h22, fmt = '%18.16e')
        M.save(prefix+'t2h31.dat',t2h31, fmt = '%18.16e')
        M.save(prefix+'t3hB.dat',t3hB, fmt = '%18.16e')
        M.save(prefix+'t3hnoB.dat',t3hnoB, fmt = '%18.16e')
        M.save(prefix+'t4hT.dat',t4hT, fmt = '%18.16e')
        M.save(prefix+'t4hnoT.dat',t4hnoT, fmt = '%18.16e')
        
    correl = 0.*covar

    tnorm = t
    for i in M.arange(len(h.k)):
        for j in M.arange(len(h.k)):
            correl[i,j] = covar[i,j]/M.sqrt(covar[i,i]*covar[j,j])

    M.save(prefix+'nbins.dat',M.array([len(h.k)]), fmt = '%d')
    M.save(prefix+'correl.dat',correl, fmt = '%4.3f')
Пример #27
0
def test_save_using_pylab():
    z = np.zeros([3, 5])
    path = "/tmp/test.pl"
    pl.save(path, z)
Пример #28
0
def generate_data(a_b, xl, type):
    y = []
    execution_time = []

    particle = Spheroid(m, a_b, type)
    particle.set_xl(xl)

    id = str(a_b) + " " + str(xl) + " " + str(type)

    for i in range(n_min, n_max, 2):
        nmax = i
        print nmax
        start = time.time()
        svm = SpheroidalSVM(particle, nmax)
        b_sca = svm.getSolution(TMInputWave(alpha))[0]
        C_ext = getCext(particle, alpha, b_sca, nmax)[0]
        C_sca = getCsca(particle, b_sca, nmax)[0]
        delta = (C_ext - C_sca) / (C_ext + C_sca)
        execution_time.append(time.time() - start)
        y.append(delta)

    y = np.fabs(y)
    save("svm_delta" + id, y)
    save("svm_time" + id, execution_time)
    y = []
    execution_time = []

    for i in range(n_min, n_max, 2):
        nmax = i
        print nmax
        start = time.time()
        ebcm = SpheroidalEBCM(particle, nmax)
        b_sca = ebcm.getMatrixSolution(TMInputWave(alpha))[0]
        C_ext = getCext(particle, alpha, b_sca, nmax)[0]
        C_sca = getCsca(particle, b_sca, nmax)[0]
        delta = (C_ext - C_sca) / (C_ext + C_sca)
        execution_time.append(time.time() - start)
        y.append(delta)

    y = np.fabs(y)
    save("ebcm_m_delta" + id, y)
    save("ebcm_m_time" + id, execution_time)
    y = []
    execution_time = []

    for i in range(n_min, n_max, 2):
        nmax = i
        print nmax
        start = time.time()
        ebcm = SpheroidalEBCM(particle, nmax)
        b_sca = ebcm.getSolution(TMInputWave(alpha))[0]
        C_ext = getCext(particle, alpha, b_sca, nmax)[0]
        C_sca = getCsca(particle, b_sca, nmax)[0]
        delta = (C_ext - C_sca) / (C_ext + C_sca)
        execution_time.append(time.time() - start)
        y.append(delta)

    y = np.fabs(y)
    save("ebcm_delta" + id, y)
    save("ebcm_time" + id, execution_time)
    y = []
    execution_time = []
Пример #29
0
            y.append(l)
            i += 1
            if (max_num > 0 and i >= max_num):
                break
    return np.array(x), np.array(y)


if (reload):
    # Train set
    x_train, y_train = read_data(train_image_paths, train_labeled_studies,
                                 n_train, bone_name)
    # Test set
    x_test, y_test = read_data(valid_image_paths, valid_labeled_studies,
                               n_test, bone_name)

    save(data_folder + "x_train_" + bone_name, x_train)
    save(data_folder + "y_train_" + bone_name, y_train)
    save(data_folder + "x_test_" + bone_name, x_test)
    save(data_folder + "y_test_" + bone_name, y_test)
else:
    # Load saved data
    x_train = load(data_folder + "x_train_" + bone_name + ".npy")
    y_train = load(data_folder + "y_train_" + bone_name + ".npy")
    x_test = load(data_folder + "x_test_" + bone_name + ".npy")
    y_test = load(data_folder + "y_test_" + bone_name + ".npy")

### Build the model:

import numpy as np
import scipy as sp
from matplotlib import pyplot as plt
Пример #30
0
from boutdata.collect import collect
from boututils.moment_xyzt import moment_xyzt
from boututils.file_import import file_import
from pylab import save, figure, plot, title, xlabel, ylabel, show, tight_layout
from elm_size import elm_size

path='./data'

t_array=collect('t_array', path=path)
save('t_array.dat', t_array)
p0=collect('P0', path=path)
save('p0.dat', p0)


# n0=collect('n0', path=path)
# save('n0.dat', n0
# ti0=collect('ti0', path=path)
# save('ti0.dat', ti0)
# te0=collect('te0', path=path)
# save('te0.idl.dat', te0)

gfile=file_import('./cbm18_dens8.grid_nx68ny64.nc')

p=collect('P', path=path)
save('p.dat', p)
res=moment_xyzt(p,'RMS','DC')
rmsp=res.rms
dcp=res.dc
save('rmsp.dat', rmsp)
save('dcp.dat',  dcp)
elmsp=elm_size(dcp,p0,gfile,yind=32,Bbar=gfile['bmag'])
Пример #31
0
data_ecog_clean = pl.load(os.path.join(memap_folder,'data_ecog_clean.npy'), mmap_mode='r+')
data_ecog_mua_shaftC = pl.load(os.path.join(memap_folder,'data_ecog_mua_shaftC.npy'), mmap_mode='r+')

data_ecog_fft_norm_shaftA = pl.load(os.path.join(memap_folder, 'data_ecog_fft_norm_shaftA.npy'), mmap_mode=None)
data_ecog_fft_norm_shaftC = pl.load(os.path.join(memap_folder, 'data_ecog_fft_norm_shaftC.npy'), mmap_mode=None)

phases_all_shaftA = pl.load(os.path.join(memap_folder, 'phases_all_shaftA.npy'), mmap_mode=None)
phases_all_shaftC = pl.load(os.path.join(memap_folder, 'phases_all_shaftC.npy'), mmap_mode=None)

data = pl.load(os.path.join(memap_folder,'B14R9_raw.npy'), mmap_mode='r+')


# ----------Data generation-----------------
data = lio.read_all_csc(folder,  assume_same_fs=False, memmap=True, memmap_folder=memap_folder, save_for_spikedetekt=False, channels_to_save=None, return_sliced_data=False)
pl.save(os.path.join(memap_folder, 'B14R9_raw.npy'), data)

data_ecog = data[:64,:]
data_probe = data[64:,:]


data_probe_hp = pl.memmap(os.path.join(memap_folder,'data_probe_hp.dat'), dtype='int16', mode='w+', shape=pl.shape(data_probe))
for i in pl.arange(0, pl.shape(data_probe)[0]):
    data_probe_hp[i,:] = filters.high_pass_filter(data_probe[i, :], Fsampling=f_sampling, Fcutoff=f_hp_cutoff)
    data_probe_hp.flush()
    print(i)
pl.save(os.path.join(memap_folder, 'data_probe_hp.npy'), data_probe_hp)


shape_data_ss = (pl.shape(data_ecog)[0], pl.shape(data_ecog)[1]/int(f_sampling/f_subsample))
data_ecog_lp_ss = pl.memmap(os.path.join(memap_folder, 'data_ecog_lp_ss.dat'), dtype='int16', mode='w+', shape=shape_data_ss)
Пример #32
0
    #pl.subplot(212)
    #pl.plot(times-int(times[0]),kms,'.')
    #pl.xlabel('Time (HJD)')
    #pl.ylabel('Line Velocity (km/s)')
    #pl.ylim(-250,250)
    #pl.show()
    
    
    temp = pl.zeros((len(times),4),dtype='float')
    for i in range(len(times)):
        temp[i,0] = times[i]
        temp[i,1] = kms[i]
        temp[i,2] = fwhm[i]
        temp[i,3] = flux[i]
    
    pl.save('%s_ew.dat'%line,temp)
    
    results = []
    times= []
    kms = []
    fwhm = []
    flux = []



# plot the velocities of the lines and the photometry lightcurve




def lonepoly(X,
             y,
             Q,
             m,
             nbIter,
             method,
             lamb=0,
             r=1,
             nbRestart=1,
             frein=True,
             drawObj=False,
             drawPartial=False,
             thbet=10,
             thgam=3):
    '''
    fonction qui résout le modèle polytomique ordonné sparse à lambda (ou r) fixé.
    5 argument obligatoires 5 argument optionnels de methode, 2 arguments de dessin dessins, 2 inutiles maintenant (grade au cas ou).
    Remarque : On peut également appliquer des "restart" si necessaire (d'ou la presence de deux boucles for imbriquées avec l1 et l2) 
    X : la Matrice patients * genes.
    y : Le vecteur des détériorations.
    n : Nombre de patients. 
    p : Nombre de gènes.
    Q : Le nombre d'états de détérioration.
    lamb : La pénalisation.
    L : Le nombre d'itérations. Fixé à l'avance. (noté N dans l'article). 
    thbet et thgam :  Limites à ne pas franchir pour les valeur de beta et gama (on peut les fixer à +- inf finalement.
    Support = Vecteur de taille p qui vaut 0 pour les gènes non influants et 1 pour les gènes influants.
    supp = Vecteur de taille inférieur à p qui garde l'indexe des gènes influants.
    method possibles = Nesterov (N) GradianSimple (GS) Mixte (M) Frank-Wolf (FW) Online Frank-Wolfe (OFW)  
    '''
    # Conditions initiales
    n = X.shape[0]
    p = X.shape[1]
    bet0 = np.zeros((p, 1))
    gam0 = np.vstack(range(Q - 1)) - Q / 2. + 1
    bet = bet0
    gam = gam0
    thet0 = np.vstack([
        bet, gam
    ])  # Serge : vstack merœge verticalement les données tout simplement.
    thet = np.vstack([bet, gam])
    mu = 0.005
    h = 0  # indicateur de la qualité de l'évaluation pour un r donné (OFW).
    if (method == 'OFW'):
        X_l = np.zeros((0, p))
        y_l = np.zeros((0, 1))
    if drawObj:
        GraphlossFunction = [
        ]  # Permet de tracer le graphique de l'évolution de la fonction objective en fonction des itérations.
        lossFunction = lossFunction(n, p, X, bet, gam, y, lamb, mu)
        GraphlossFunction.append(lossFunction)

    # Optimisation de la fonction objective.
    for l1 in range(nbRestart):
        if (method == 'N' or method == 'M'):
            G = np.zeros((p + Q - 1,
                          0))  # G est une matrice à p+Q-1 ligne et 0 colonnes.
        for l2 in range(nbIter):
            if (method == 'GS'):
                res = IterationGradianSimple(X, y, thet, bet, gam, mu, p, Q, n,
                                             m, lamb, frein)
            if (method == 'N'):
                res = Iteration_Nesterov(X, y, thet, bet, gam, thet0, mu, l2,
                                         p, Q, n, m, lamb, G, frein)
                G = res[3]
            if (method == 'M' and l2 < nbIter / 5):
                res = IterationGradianSimple(X, y, thet, bet, gam, mu, p, Q, n,
                                             m, lamb, frein)
            if (method == 'M' and l2 >= nbIter / 5):
                res = Iteration_Nesterov(X, y, thet, bet, gam, thet0, mu,
                                         l2 - nbIter / 5, p, Q, n, m, lamb, G,
                                         frein)
                G = res[3]
            if (method == 'FW'):
                res = IterationFW(X, y, thet, bet, gam, p, Q, n, m, r, l2)
            if (method == 'OFW'):
                h = h + like(n, p, X, bet, gam, y)
                if (l2 % n == 0):
                    np.random.seed(2111983 + l2 / n)
                    permu = np.random.permutation(range(n))
                index = permu[l2 % n]
                w_l_X = np.matrix(X.take(index, axis=0))
                w_l_y = np.matrix(y.take(index, axis=0))
                X_l = np.concatenate((X_l, w_l_X), axis=0)
                y_l = np.concatenate((y_l, w_l_y), axis=0)
                res = IterationOFW(X_l, y_l, thet, bet, gam, p, Q, l2 + 1, m,
                                   r, l2)
            if (method == 'QOFW'):
                if (l2 % n == 0):
                    np.random.seed(2111983 + l2 / n)
                    permu = np.random.permutation(range(n))
                index = permu[l2 % n]
                X_l = X.take(index, axis=0)
                y_l = y.take(index, axis=0)
                res = IterationOFW(X_l, y_l, thet, bet, gam, p, Q, 1, m, r, l2)
            thet = res[0]
            bet = res[1]
            gam = res[2]

            if drawObj:
                lossFunction = lossFunction(n, p, X, bet, gam, y, lamb, mu)
                GraphlossFunction.append(lossFunction)
            if drawPartial:
                print "bet = ", bet
                print "gradbet(X,y,n,p,bet,gam) = ", gradbet(
                    X, y, n, p, bet, gam, 0)
                for i in range(len(gam)):
                    draw1DGam(n, p, X, bet, gam, m, y, lamb, mu, i, 100)
                    print "i = ", i, "gam = ", gam
                for i in range(len(bet)):
                    draw1DBet(n, p, X, bet, gam, y, lamb, mu, i, 100)

    #print "Like = ", like(n,p,X,bet,gam,y) , "l = ", l2

    # Fin de l'alogorithme de d'optimisation. Mise en forme des outputs.
    Support = (abs(bet) > .01)
    if (
            lamb == 0 and (method == 'N' or method == 'GS' or method == 'M')
    ):  # Si lamba = 0, on conserve tout le support (mais pourquoi déjà????)
        Support = (abs(bet) > -1)
    supp = np.zeros((np.sum(Support)))
    SizeS = size(supp, axis=0)  # SizeS est la nouvelle valeur de p.
    if SizeS > 70:
        worked = 0  # Ca signifie qu'il y a une erreur je suppose.
    else:
        worked = 1
    save('bet', bet)
    jj = 0
    for j in range(p):
        if Support[j]:
            supp[jj] = j
            jj = jj + 1

        if math.isnan(bet[j, 0]):
            if j == 0:
                print "erreur, le ", j, "eme coefficient de beta est ", bet[j,
                                                                            0]
            worked = 0

    if SizeS > 0:
        bet = np.transpose(np.asmatrix(bet.take([supp])))
    if SizeS == 0:
        bet = []
    if drawObj:
        GraphlossFunction = np.vstack(
            GraphlossFunction
        )  # Ce paragraphe sert à tracer le graph de la fonction objective.
        plt.plot(GraphlossFunction)
        if frein:
            chaine1 = 'avec recherche lineaire, '
            chaine2 = '_avec_recherche_lineaire'
        if (not frein):
            chaine1 = ''
            chaine2 = ''
        if (method == 'Nesterov'):
            plt.title('function Objective, Nesterov ' + chaine1 + 'lambda = ' +
                      str(lamb))
            plt.savefig('function_Objective,_Nesterov' + chaine2 +
                        '_lambda_=_' + str(lamb))
        if (method == 'GradianSimple'):
            plt.title('function Objective, gradian simple ' + chaine1)
            plt.savefig('function_Objective,_gradian_simple' + chaine2)
        if (method == 'FW'):
            plt.title('function Objective, FW ')
            plt.savefig('function_Objective, FW')
        if (method == 'OFW'):
            plt.title('function Objective, Online FW ')
            plt.savefig('function_Objective, Online FW')
        plt.show()

    return [bet, gam, supp, worked, h
            ]  # Ajouter FinalLogLike ou l'état final de la function objective.
Пример #34
0
import numpy as np
from boutdata import collect
from boututils import moment_xyzt, file_import
from pylab import save, figure, plot, title, xlabel, ylabel, show, tight_layout
from elm_size import elm_size

path = './data'

t_array = collect('t_array', path=path)
save('t_array.dat', t_array)
p0 = collect('P0', path=path)
save('p0.dat', p0)

# n0=collect('n0', path=path)
# save('n0.dat', n0
# ti0=collect('ti0', path=path)
# save('ti0.dat', ti0)
# te0=collect('te0', path=path)
# save('te0.idl.dat', te0)

gfile = file_import('./cbm18_dens8.grid_nx68ny64.nc')

p = collect('P', path=path)
save('p.dat', p)
res = moment_xyzt(p, 'RMS', 'DC')
rmsp = res.rms
dcp = res.dc
save('rmsp.dat', rmsp)
save('dcp.dat', dcp)
elmsp = elm_size(dcp, p0, gfile, yind=32, Bbar=gfile['bmag'])
save('elmsp.dat', elmsp)
Пример #35
0
from scipy.ndimage.filters import maximum_filter, gaussian_filter,rank_filter, median_filter
from scipy.ndimage.morphology import grey_opening,grey_closing, grey_erosion
from scipy.ndimage.measurements import label,center_of_mass,maximum_position

# ===================== MANIPULATING THE IMAGE ==================
# remornalising the input. 
# If needed, a nonlinear transformation can be performed changing the exponent variable
# Larger exponents suppress the low intensity region of the spectrum 
print "====> Starting the manipulations...\n"

if (exponent!= 1):
    print "- Nonlinear Stretching..."
    # Data=((Data/float(Data.max()/theta) )**exponent*255/phi).astype(int)
    Data=((Data/float(Data.max()/theta) )**exponent*255/phi).astype(int)
    if(save):
        pl.save("Stretching", Data)

if(opening):
    print "- Morphological Opening..."
    Data=grey_opening(Data, structure=Cross)
    if(save):
        pl.save("Opening", Data)

if(erosion):
    print "- Morphological Erosion..."
    Data=grey_erosion(Data, structure=Cross)
    if(save):
        pl.save("Erosion", Data)
if(closing):
    print "- Morphological Closing..."
    Data=grey_closing(Data, structure=Cross)
Пример #36
0
from scipy import arange, sin, pi, randn, zeros
import pylab as p

a = 2  # 2 volt amplitude
f = 10  # 10 Hz frequency
sigma = 0.5  # 0.5 volt standard deviation noise

# create the t and v and store them a 2D array X
t = arange(0.0, 2.0, 0.02)  # an evenly sampled time array
v = a * sin(2 * f * pi * t) + sigma * randn(len(t))  # a noisy sine wave
X = zeros((len(t), 2))  # an empty output array
X[:, 0] = t  # add t to the first column
X[:, 1] = v  # add s to the 2nd column
p.save('data/noisy_sine.dat', X)  # save the output file as ASCII

# plot the arrays t vs v and label the x-axis, y-axis and title
# save the output figure as noisy_sine.png
p.plot(t, v, 'b-')
p.xlabel('time (s)')
p.ylabel('volts (V)')
p.title('A noisy sine wave')
p.grid()
p.savefig('noisy_sine.png', dpi=150)
p.savefig('noisy_sine.eps')
p.show()
Пример #37
0
ecog_bad_channels = [
    14, 30, 31, 34, 35, 36, 37, 38, 39, 40, 41, 45, 46, 48, 49, 53, 55, 57, 60,
    61, 62, 63
]
probe_bad_channels = [13, 14, 15, 16, 22, 25, 29, 30]

#----------Data generation-----------------
data = lio.read_all_csc(folder,
                        assume_same_fs=False,
                        memmap=True,
                        memmap_folder=memap_folder,
                        save_for_spikedetekt=False,
                        channels_to_save=None,
                        return_sliced_data=False)
pl.save(os.path.join(memap_folder, 'B14R9_raw.npy'), data)

data_ecog = data[:64, :]
data_probe = data[64:, :]

data_probe_hp = pl.memmap(os.path.join(memap_folder, 'data_probe_hp.dat'),
                          dtype='int16',
                          mode='w+',
                          shape=pl.shape(data_probe))
for i in pl.arange(0, pl.shape(data_probe)[0]):
    data_probe_hp[i, :] = filters.high_pass_filter(data_probe[i, :],
                                                   Fsampling=f_sampling,
                                                   Fcutoff=f_hp_cutoff)
    data_probe_hp.flush()
    print(i)
pl.save(os.path.join(memap_folder, 'data_probe_hp.npy'), data_probe_hp)
Пример #38
0
>>> data = zeros((3,3))
>>>#Write data:
>>> savetxt("myfile.txt", data)
>>>#Read:
>>> data = genfromtxt("myfile.txt") }}}

== Matplotlib (pylab) ==

Matplotlib  provides an easy solution which seems to load data faster than read_array:

{{{#!python numbers=disable
>>> from numpy import *
>>> from pylab import load           # warning, the load() function of numpy will be shadowed
>>> from pylab import save
>>> data = zeros((3,3))
>>> save('myfile.txt', data)
>>> read_data = load("myfile.txt")

# <markdowncell>

# numPy
# -----
# 
# <codecell>


>>> savetxt('myfile.txt', data, fmt="%12.6G")    # save to file

# <markdowncell>

# 
Пример #39
0
#!/usr/bin/env python
import scipy
import pylab
from motor_comm import Motor_Comm

comm = Motor_Comm()
dt = comm.dt()
num_motor = comm.num_motor()

T = 4.0
n = T / dt

t = scipy.arange(0, 4 * n) * dt
x = scipy.zeros((t.shape[0], num_motor))
x[:, 0] = 400 * scipy.cos(2.0 * scipy.pi * t / T) + 400 * scipy.cos(
    4.0 * scipy.pi * t / T)
x[:, 1] = 500 * scipy.cos(2.0 * scipy.pi * t / T) + 400 * scipy.cos(
    4.0 * scipy.pi * t / T)
pylab.save('test_kine.txt', x)
Пример #40
0
    mec = meccs.meccs()
    sp = shape_procs.shape_procs()

    data = sp.read_cs_data(base_dir + "/" + estuaryid + "_" + str(syr) + "_" +
                           str(sday) + "_to_" + str(eyr) + "_" + str(eday) +
                           ".sp")

    su_u = zeros(data["cs_surf_salt"].shape, float)
    ds_s = zeros(data["cs_surf_salt"].shape, float)
    legendstr = []

    for i in range(len(s_dat)):
        dat = mec.get_Hansen_Rattray(data["cs_surf_salt"][i,:], data["cs_bott_salt"][i,:], data["davg_salt"][i,:], \
                                 data["cs_surf_hvel_dir"][i,:], data["davg_hvel_mag"][i,:], data["dt"], 24.8 * 3600)

        save(out_base + data["names"][i][0] + ".hr", dat, fmt="%20.10f")

        legendstr.append(data["names"][i][0])
        su_u[i, :] = dat[0, :]
        ds_s[i, :] = dat[1, :]

    mec.hnr_plot(su_u, ds_s, "test HNR plot", legendstr)

if ctype == "river_flux":
    start_time = ydhms2corie(int(syr), int(sday), 0, 15, 0)
    end_time = ydhms2corie(int(eyr), int(eday), 0, 0, 0)

    mr = mreader(base_dir, array([]), start_time, end_time)
    data = mr.get_river_flux(base_dir, start_time, end_time, 11, 900, [0, 1],
                             1)
Пример #41
0



results = pl.array(results)
times = pl.array(times)
kms = ((results-6563.0)/6563.0)*2.99792458e5

pl.subplot(211)
pl.plot(times-int(times[0]),fwhm,'.')
pl.xlabel('Time (HJD)')
pl.ylabel('FWHM (Angstrom)')


pl.subplot(212)
pl.plot(times-int(times[0]),kms,'.')
pl.xlabel('Time (HJD)')
pl.ylabel('Line Velocity (km/s)')
pl.ylim(-250,250)
pl.show()


temp = pl.zeros((len(times),2),dtype='float')
for i in range(len(times)):
    temp[i,0] = times[i]
    temp[i,1] = kms[i]

pl.save('HaVel.dat',temp)


data_ecog_clean = pl.load(os.path.join(memap_folder,'data_ecog_clean.npy'), mmap_mode='r+')
data_ecog_mua_shaftC = pl.load(os.path.join(memap_folder,'data_ecog_mua_shaftC.npy'), mmap_mode='r+')

data_ecog_fft_norm_shaftA = pl.load(os.path.join(memap_folder, 'data_ecog_fft_norm_shaftA.npy'), mmap_mode=None)
data_ecog_fft_norm_shaftC = pl.load(os.path.join(memap_folder, 'data_ecog_fft_norm_shaftC.npy'), mmap_mode=None)

phases_all_shaftA = pl.load(os.path.join(memap_folder, 'phases_all_shaftA.npy'), mmap_mode=None)
phases_all_shaftC = pl.load(os.path.join(memap_folder, 'phases_all_shaftC.npy'), mmap_mode=None)

data = pl.load(os.path.join(memap_folder,'B14R9_raw.npy'), mmap_mode='r+')


#----------Data generation-----------------
data = lio.read_all_csc(folder,  assume_same_fs=False, memmap=True, memmap_folder=memap_folder, save_for_spikedetekt=False, channels_to_save=None, return_sliced_data=False)
pl.save(os.path.join(memap_folder, 'B14R9_raw.npy'), data)

data_ecog = data[:64,:]
data_probe = data[64:,:]


data_probe_hp = pl.memmap(os.path.join(memap_folder,'data_probe_hp.dat'), dtype='int16', mode='w+', shape=pl.shape(data_probe))
for i in pl.arange(0, pl.shape(data_probe)[0]):
    data_probe_hp[i,:] = filters.high_pass_filter(data_probe[i,:], Fsampling=f_sampling, Fcutoff=f_hp_cutoff)
    data_probe_hp.flush()
    print(i)
pl.save(os.path.join(memap_folder, 'data_probe_hp.npy'), data_probe_hp)


shape_data_ss = (pl.shape(data_ecog)[0], pl.shape(data_ecog)[1]/int(f_sampling/f_subsample))
data_ecog_lp_ss = pl.memmap(os.path.join(memap_folder, 'data_ecog_lp_ss.dat'), dtype='int16', mode='w+', shape=shape_data_ss)
Пример #43
0
        # Save filenames of matching views
        file = open(query_files[1] % (i, "match"), "w")
        for (index, score) in top_n:
            file.write("%s %f\n" %
                       (reference_files[0] % reference_files[2][index], score))
    return M


def plot_confusion(M):
    # Show full score matrix
    pylab.pcolor(numpy.array(M))
    pylab.colorbar()
    pylab.show()


# Tuples describing file names (image format, output format, range)
run1_files = ("/u/kosecka/research/run1/%06uL.png", "run1_%06uL.%s",
              range(2400, 3401))
run2_files = ("/u/kosecka/research/run2/%06uL.png", "run2_%06uL.%s",
              range(2200, 3201))

# run 1 in vocabulary tree, run 2 used as queries
M = match_runs(run1_files, run2_files)
pylab.save('run1vt_run2q.mat', M)
#plot_confusion(M)

# run 2 in vocabulary tree, run 1 used as queries
M = match_runs(run2_files, run1_files)
pylab.save('run2vt_run1q.mat', M)
#plot_confusion(M)
Пример #44
0
        "%02d" % (init_time.day),
        "%02d" % (init_time.hour),
        "%02d" % (init_time.minute)
    ])

    if not os.path.exists('dbase/%s' % directory):
        print 'directory %s do not exist, creating directoy and downloading files' % directory
        os.makedirs('dbase/%s' % directory)

        # VSO download
        client = vso.VSOClient()
        qr = client.query(vso.attrs.Time(itime, etime),
                          vso.attrs.Instrument('hmi'),
                          vso.attrs.Physobs('intensity'))
        nrec = qr.num_records()
        save("recnums/%s.txt" % directory, nrec)
        print nrec
        res = client.get(qr, path='./tmp/{file}.fits').wait()
        listOfFiles = os.listdir('./tmp')
        for f in listOfFiles:
            os.system("mv ./tmp/%s ./dbase/%s" % (f, directory))
        save("recnums/%s.txt" % directory, nrec)
    else:
        print "Directory %s exists" % directory
    #load number of records
    snrec = load("recnums/%s.txt.npy" % directory)
    nfiles = len([name for name in os.listdir("./dbase/%s" % directory)])

    if not nfiles == snrec:
        print 'Number of files incorrect: downloading files to %s nrec = %d...nfiles = %d' % (
            directory, snrec, nfiles)
Пример #45
0
        
for i in range(len(date)):
    lt = x < 4687
    gt = x > 4686
    flux.append((pf.getdata(ff[i])[lt*gt]).sum())
    


flux = pl.array(flux)

temp = []
temp.append(date)
temp.append(phase)
temp.append(flux)

pl.save('HeIIlightcurve.dat',pl.array(temp).transpose())

lt = phase < 7.9

date = date[lt]
flux = pl.array(flux)[lt]
phase = pl.array(phase)[lt]
# flatten lightcurve using fourier method
fft = sci.fft(flux)
fft[0:4] = 0.0
fft[-4:] = 0.0
flux = sci.ifft(fft)

f,a = ast.signal.dft(date,flux,0,4000,1)
pl.figure()
pl.subplot(211)
Пример #46
0
ew = pl.array(ew[:-2])
center = pl.array(center[:-2])

print len(ew),len(center),len(HJD)

#X = pl.load('rv.dat')
#ew = X[:,0]
pl.figure()
pl.subplot(311)
pl.title('Line Center')
pl.plot(center,'o')

pl.subplot(312)
pl.plot(HJD,ew,'go')
pl.title('Equivalent Width')

pl.subplot(313)

f,a = ast.signal.dft(HJD,ew,0,4000,1)
pl.plot(f,a)
pl.title('Equivalent Width FT')

pl.show()


temp = []
temp.append(HJD)
temp.append(ew)
temp.append(center)
pl.save('EW.dat',pl.array(temp).transpose())
Пример #47
0
    fig.imshow(clrs, interpolation='nearest')
    pl.xlim(0, pl.xlim()[1])
    pl.ylim(pl.ylim()[0], 0)

    coords = (coords[(coords[:, 0] > 5) * \
                     (coords[:, 0] < stack.shape[0] - 5) *\
                     (coords[:, 1] > 5) * \
                     (coords[:, 1] < stack.shape[1] - 5)])

    print("Found %d good windows away from the edges" % len(coords))

    for c in coords:
        pl.plot(c[1], c[0], 'wo', alpha=0.3)
    pl.title("gaussian std %d, threshold %.1f, labels %d" %
             (GS, thr, np.sum(goodwindows)))
    pl.savefig(args[0].replace(".npy", "_selectedwindows.pdf"))
    if options.showme: pl.show()
    pl.close()
    # coords = np.array([coords[:, 1], coords[:, 0]])
    pl.save(args[0].replace(".npy", "_coords.npy"), coords[:, 1::-1])
    pl.save(args[0].replace(".npy", "_mask.npy"), mask)
    # print (mask)

    # for i in range(labels.max()+1):
    #    if i in labels:
    #        pl.figure()
    #        pl.imshow(mask[:i].sum(axis=0),  interpolation='nearest',
    #               cmap='gist_gray')
    #        pl.savefig("mask.%04d.png"%i)
    #        pl.close('all')
Пример #48
0
def showVectorDisplacements():

    global testImage, croppedRefImage, u, v, valid, q1, umean, vmean, x, y, sxyVar, wxyVar, goodvectorsVar
    from scipy import where, compress, logical_and, median, logical_or, nan
    from pylab import resize, transpose, quiver, title, show, find, imshow, hist, figure, clf, draw, save, load, xlabel, ylabel, flipud

    mxy = 3
    wxy = int(wxyVar.get())
    sxy = int(sxyVar.get())
    goodvectors = float(goodvectorsVar.get())
    #process to find PIV-style displacements
    x, y, u, v, q1, valid = simplepiv(croppedRefImage, testImage, wxy, mxy,
                                      sxy)
    good = where(logical_and(q1 > goodvectors, valid > 0), True, False)
    umean = median(compress(good.flat, u.flat))
    vmean = median(compress(good.flat, v.flat))
    u = where(logical_or(q1 < goodvectors, valid < 0), 0, u)
    v = where(logical_or(q1 < goodvectors, valid < 0), 0, v)
    u = u - umean
    v = v - vmean
    save('vecx.out', x)
    save('vecy.out', y)
    save('vecu.out', u)
    save('vecv.out', v)
    save('vecq1.out', q1)
    save('vecvalid.out', valid)
    u = flipud(u)
    v = -flipud(v)
    quiver(x, y, u, v)
    title('Vector displacements')
    xlabel('Pixels')
    ylabel('Pixels')
    show()
    return
Пример #49
0
mkdir_p(os.path.join(outpath, 'pdb'))
mkdir_p(os.path.join(outpath, 'aln'))
mkdir_p(os.path.join(outpath, 'adist'))

#erase files
with open(os.path.join(outpath, 'insertCounts'), 'wt') as f:
    pass
with open(os.path.join(outpath, 'deletecounts'), 'wt') as f:
    pass
with open(os.path.join(outpath, 'unresolvedcounts'), 'wt') as f:
    pass

for name in fullalnseqs.keys():
    if not os.path.exists(os.path.join(distpath, name + '.npy')):
        print(name, "Skipping: No coords")
        continue
    print(name)

    adist = plotContacts(name)
    pylab.subplots_adjust(bottom=0.05, right=0.95, top=0.95, left=0.05)

    pylab.figure(1)
    pylab.savefig(os.path.join(outpath, 'pdb/{}.png'.format(name)))
    pylab.close()
    pylab.figure(2)
    pylab.savefig(os.path.join(outpath, 'aln/{}.png'.format(name)))
    pylab.close()

    pylab.save(os.path.join(outpath, 'adist/{}'.format(name)), adist)
Пример #50
0
lines['Hb'] = Hb
lines['HeII'] = HeII
lines['Cont'] = continuum
time = time[:-2]
# save the lightcurves to file
for line in ['Ha','Hb','HeII','Cont']:
#for line in ['HeII']:
    lc = pl.array(lines[line])[:-2]
    #lcmean = lc.mean()
    #lc = lc - lcmean
    ##temp = pl.zeros((len(lc),3),dtype='float')
    
    ## flatten lightcurves by fitting 11th order poly to them
    #tmean = time.mean()

    #p = pl.polyfit(time-tmean,lc,13)
    #yy = pl.polyval(p,time-tmean)
    #print yy
    temp = []
    temp.append(time)
    temp.append(lc)
    #temp.append(lc-yy)
    
    #for i in range(len(lc)):
        #temp[i,0] = time[i]
        #temp[i,1] = lc[i]
        #temp

    pl.save('speclc_%s.dat'%line,pl.array(temp).transpose(),fmt='%1.6f')

import sciris as sc
import pylab as pl


class Animal(sc.prettyobj):
    def __init__(self, name, mass, velocity):
        self.name = name
        self.mass = mass
        self.velocity = velocity

    def energy(self):
        return 0.5 * self.mass * self.velocity**2


# Create animals
worm = Animal(name='worm', mass=0.002,
              velocity=0.01)  # shorthand: Animal('worm', 0.002, 0.01)
pigeon = Animal(name='pigeon', mass=0.086, velocity=9.6)
cat = Animal(name='cat', mass=7.3, velocity=4.4)
animals = [worm, pigeon, cat]

# Save and load animals
pl.save('animals.npy', animals)  # file must end .npy (for numpy)
saved_animals = pl.load('animals.npy')

# Check it worked
print(saved_animals[0])
Пример #52
0
def generate_data(a_b,xl,type):
    y=[]
    execution_time=[]

    particle = Spheroid(m,a_b,type)
    particle.set_xl(xl)



    id = str(a_b) + " " + str(xl) + " " + str(type)

    for i in range(n_min,n_max,2):
        nmax = i
        print nmax
        start = time.time()
        svm = SpheroidalSVM(particle,nmax)
        b_sca = svm.getSolution(TMInputWave(alpha))[0]
        C_ext = getCext(particle, alpha, b_sca, nmax)[0]
        C_sca = getCsca(particle, b_sca, nmax)[0]
        delta=(C_ext-C_sca)/(C_ext+C_sca)
        execution_time.append(time.time() - start)
        y.append(delta)

    y= np.fabs(y)
    save("svm_delta"+id,y)
    save("svm_time"+id,execution_time)
    y=[]
    execution_time=[]

    for i in range(n_min,n_max,2):
        nmax = i
        print nmax
        start = time.time()
        ebcm = SpheroidalEBCM(particle,nmax)
        b_sca = ebcm.getMatrixSolution(TMInputWave(alpha))[0]
        C_ext = getCext(particle, alpha, b_sca, nmax)[0]
        C_sca = getCsca(particle, b_sca, nmax)[0]
        delta=(C_ext-C_sca)/(C_ext+C_sca)
        execution_time.append(time.time() - start)
        y.append(delta)

    y= np.fabs(y)
    save("ebcm_m_delta"+id,y)
    save("ebcm_m_time"+id,execution_time)
    y=[]
    execution_time=[]

    for i in range(n_min,n_max,2):
        nmax = i
        print nmax
        start = time.time()
        ebcm = SpheroidalEBCM(particle,nmax)
        b_sca = ebcm.getSolution(TMInputWave(alpha))[0]
        C_ext = getCext(particle, alpha, b_sca, nmax)[0]
        C_sca = getCsca(particle, b_sca, nmax)[0]
        delta=(C_ext-C_sca)/(C_ext+C_sca)
        execution_time.append(time.time() - start)
        y.append(delta)

    y= np.fabs(y)
    save("ebcm_delta"+id,y)
    save("ebcm_time"+id,execution_time)
    y=[]
    execution_time=[]
Пример #53
0
spectre = exp(-(e / 200)**2 - (t / lgt / .6)**2) * 30
pics = [(52., 60., 2.5, 10000.), (52., 140., 1., 10000.), (52., 150., 1., .5),
        (125., 160., 1., .3), (125., 170., 1., .2), (125., 180., 1., .1),
        (222., 190., 1., .07), (422., 200., 1., .05), (52., 340., 1., -10000.),
        (8., 350., 1., -5.), (2., 360., 1., -1.), (1., 370., 1., -.2),
        (.1, 380., 1., -.1), (222., 390., 1., .07), (22., 400., 1., -.8)]
for pds, moy, sig, tau in pics:
    print(pds, moy, sig, tau)
    spectre += pds * exp(-.5 * (e - moy)**2 / sig**2) * exp(-t / tau / lgt)
pics = [(52., 260., 1., .2), (52., 275., 1., .08), (1., 290., 1., .1)]
for pds, moy, sig, tau in pics:
    print(pds, moy, sig, tau)
    spectre += pds * exp(-.5 *
                         (e - moy)**2 / sig**2) * (1.1 + sin(-t / tau / lgt))

spectre = poisson(spectre)
subplot(121)
semilogy(te, spectre.sum(0))
subplot(122)
semilogy(tt, spectre.sum(1))

tb = c_[tt, spectre].T
# df=pd.DataFrame(c_[tt,spectre])

# df.to_csv("jouet_s2.csv",sep="\t",header=False,index=False)
# savetxt("jouet_s2.csv",c_[tt,spectre],fmt='%.6e'+' \t%i'*lge, delimiter='\t ')

pl.save('jouet_s2_dict.bin', {"ta": 1., "spectre": tb})

show()
Пример #54
0
 def save(self, filename="/dev/stdout"):
     """Dumps detector object to file"""
     pylab.save(filename, self.data, fmt='%g')
Пример #55
0
    def __init__(
        self,
        features,
        nfeatures,
        nside=16,
        include_haversine=False,
        galactic_mask=None,
        affinity="euclidean",
        scaler=preprocessing.StandardScaler(),
        file_affinity="",
        verbose=False,
        save_affinity=False,
        feature_weights=None,
    ):
        """
        -features: list of features to cluster
        -nfeatures

        """
        self._nside = nside
        self._nfeat = nfeatures
        if galactic_mask is None:
            self.galactic_mask = np.bool_(pl.ones_like(features[0]))
        else:
            self.galactic_mask = galactic_mask
        features[0] = features[0][galactic_mask]
        features[1] = features[1][galactic_mask]

        if self._nfeat > 1:
            assert features[0].shape[0] == features[1].shape[0]
            self._npix = features[0].shape[0]  # hp.nside2npix(nside)
        else:
            self._npix = features.shape[0]
        if feature_weights is None:
            feature_weights = pl.ones(self._nfeat)
        self.verbose = verbose
        self._X = pl.zeros((self._npix, self._nfeat))

        if self._nfeat == 1:
            features = [features]
        for i, x in zip(range(self._nfeat), features):
            self._X[:, i] = x
        # Standard rescaling of all the features
        if scaler is not None:
            self._X = scaler.fit_transform(self._X)

        for i in range(self._nfeat):
            self._X[:, i] *= feature_weights[i]

        self.estimate_affinity(affinity, file_affinity)

        self._has_angles = False
        if include_haversine:
            self._has_angles = True
            self.estimate_haversine()

        self._Affinity = pairwise_kernels(self._distance_matr,
                                          metric="precomputed")

        if save_affinity:
            pl.save(file_affinity, self._Affinity)
Пример #56
0



results = pl.array(results)
times = pl.array(times)
kms = ((results-4686.0)/4686.0)*2.99792458e5

pl.subplot(211)
pl.plot(times-int(times[0]),fwhm,'.')
pl.xlabel('Time (HJD)')
pl.ylabel('FWHM (Angstrom)')


pl.subplot(212)
pl.plot(times-int(times[0]),kms,'.')
pl.xlabel('Time (HJD)')
pl.ylabel('Line Velocity (km/s)')
pl.ylim(-400,400)
pl.show()


temp = pl.zeros((len(times),2),dtype='float')
for i in range(len(times)):
    temp[i,0] = times[i]
    temp[i,1] = kms[i]

pl.save('HeIIVel.dat',temp)


Пример #57
0
def save_it_all():
    from pylab import save
    save((train_string, test_string), 'wiki_letters_2G')