예제 #1
0
파일: Utilities2.py 프로젝트: qAp/LisaMapp
 def __init__(self, xlmrpath, xlmipath, maptype):
     xlmr = pylab.load(xlmrpath)
     xlmi = pylab.load(xlmipath)
     xlmrname = os.path.basename(xlmrpath)
     self.outfileprefix = '-'.join(re.split('-', xlmrname)[:-1])
     try:
         assert numpy.shape(xlmr) == numpy.shape(xlmi)
         self.xlmr, self.xlmi = xlmr, xlmi
     except AssertionError:
         print 'The two arrays loaded must have the same dimensions.'
     self.xlm = self.xlmr + 1j * self.xlmi
     print 'Multiple moments files read successfully!'
     ntrunc = int(numpy.sqrt(numpy.shape(self.xlm)[0]) - 1)
     self.ntrunc = ntrunc
     self.gotpix = False
     self.maptype = maptype
     #        print 'Applying quick-fix I to multiple moments...'
     #        indxpn = LISAresponse.getMLvec( self.ntrunc , 'pn' )
     #        xlm = numpy.zeros( numpy.shape(self.xlm) , dtype=complex )
     #        for i,ml in enumerate(indxpn):
     #            m , l = ml[0] , ml[1]
     #            k = indxpn.index( ( -m,l ) )
     #            xlm[i] = (-1)**m*self.xlm[k]
     #        self.xlm = xlm
     return
예제 #2
0
def doit():
    map = Basemap(projection='lcc',
		    llcrnrlon=80,
		    urcrnrlon=160,
		    llcrnrlat=-50,
		    urcrnrlat=-8,
		    #lat_ts=-35,
		    lat_0=-35,
		    lon_0=120,
		    resolution='c',
		    area_thresh=1000.)
    p.clf()
    map.drawcoastlines()
    # map.drawcountries()
    
    # map.drawrivers()

    map.drawmeridians(p.arange(0,360,10),labels=[0,0,1,0])
    map.drawparallels(p.arange(-90,0,10),labels=[1,0,0,0])

    traj=p.load('example_traj.dat')
    coast=p.load('/media/sda4/map-data/aust-coast-noaa-2000000-1.dat')

    traj_x,traj_y   = map(traj[:,1],traj[:,0]) 
    # coast_x,coast_y = map(coast[:,0],coast[:,1])
    
    p.plot(traj_x,traj_y)    
    p.plot(coast_x,coast_y,color='black')    

    map.drawmapboundary()
    p.show()
    return map 
예제 #3
0
def test_bnet_mle():
    """EXAMPLE: MLE learning on a BNET"""
    """Create all data required to instantiate the bnet object"""
    nodes = 4
    dag = np.zeros((nodes, nodes))
    C = 0
    S = 1
    R = 2
    W = 3
    dag[C, [R, S]] = 1
    dag[R, W] = 1
    dag[S, W] = 1
    ns = 2 * np.ones((1, nodes))
    """Instantiate the model"""
    net = models.bnet(dag, ns, [])
    """Learn the parameters"""
    samples = np.array(pylab.load('./Data/lawn_samples.txt')) - 1
    net.learn_params_mle(samples.copy())
    """Initialize the inference engine"""
    net.init_inference_engine(exact=True)
    """Create and enter evidence"""
    evidences = create_all_evidence(4, 2)
    mlcs = np.array([[0, 0, 0, 0]])
    for evidence in evidences:
        mlc = net.max_sum(evidence)
        mlcs = np.vstack((mlcs, mlc))
    """Read in expected values"""
    exp_mlcs = np.array(pylab.load('./Data/bnet_mle_exact_max_sum_res.txt'))
    """Assert that the output matched the expected values"""
    assert_array_equal(mlcs, exp_mlcs)
예제 #4
0
파일: bosGUI.py 프로젝트: zwghit/CFCFD-NG
def showContours_u():

    global q1, valid, umean, vmean, upperLevelVar, lowerLevelVar, numcontourVar, goodvectorsVar, modelgreylevelVar, lowerLevel, upperLevel, numcontour, tick
    from scipy import zeros, where, logical_or, argmin, shape, ravel, nan, compress, r_, flipud
    from pylab import imshow, clf, title, save, load, figure, contourf, cm, hold, contour, xlabel, ylabel

    x = load('vecx.out.npy')
    y = load('vecy.out.npy')
    u = load('vecu.out.npy')
    q1 = load('vecq1.out.npy')
    valid = load('vecvalid.out.npy')
    modelgreylevel = float(modelgreylevelVar.get())
    goodvectors = float(goodvectorsVar.get())
    u = where(logical_or(q1 < goodvectors, valid < 0), modelgreylevel, u)
    u = flipud(u)
    lowerLevel = float(lowerLevelVar.get())
    upperLevel = float(upperLevelVar.get())
    numcontour = int(numcontourVar.get())
    tick = float(upperLevel - lowerLevel) / numcontour
    uu = r_[lowerLevel:upperLevel:tick]
    figure()
    contourf(x, y, u, uu, cmap=cm.gray)
    contourf(x, y, u, uu, cmap=cm.gray)
    xlabel('Pixels')
    ylabel('Pixels')
    return
예제 #5
0
	def load_csv(self,f):
		"""
		Loading data from a csv file. Uses pylab's load function. Seems much faster
		than scipy.io.read_array.
		"""
		varnm = f.readline().split(',')

		# what is the date variable's key if any, based on index passed as argument
		if self.date_key != '':
			try:
				rawdata = pylab.load(f, delimiter=',',converters={self.date_key:pylab.datestr2num})			# don't need to 'skiprow' here
			except ValueError:																				# if loading via pylab doesn't work use csv
				rawdata = self.load_csv_nf(f)	

				# converting the dates column to a date-number
				rawdata[self.date_key] = pylab.datestr2num(rawdata[self.date_key])

			self.date_key = varnm[self.date_key]
		else:
			try:
				rawdata = pylab.load(f, delimiter=',')														# don't need to 'skiprow' here
			except ValueError:																				# if loading via pylab doesn't work use csv
				rawdata = self.load_csv_nf(f)	

		# making sure that the variable names contain no leading or trailing spaces
		varnm = [i.strip() for i in varnm]

		# transforming the data into a dictionary
		if type(rawdata) == list:
			# if the csv module was used
			self.data = dict(zip(varnm,rawdata))
		else:
			# if the pylab.load module was used
			self.data = dict(zip(varnm,rawdata.T))
예제 #6
0
파일: Utilities2.py 프로젝트: qAp/LisaMapp
    def __init__(self,xlmrpath,xlmipath,maptype):
        xlmr = pylab.load(xlmrpath)
        xlmi = pylab.load(xlmipath)
        xlmrname = os.path.basename(xlmrpath)
        self.outfileprefix = '-'.join( re.split( '-' , xlmrname )[:-1] )
        try:
            assert numpy.shape(xlmr) == numpy.shape(xlmi)
            self.xlmr , self.xlmi = xlmr , xlmi
        except AssertionError:
            print 'The two arrays loaded must have the same dimensions.'
        self.xlm = self.xlmr + 1j*self.xlmi
        print 'Multiple moments files read successfully!'
        ntrunc = int( numpy.sqrt( numpy.shape(self.xlm)[0] ) - 1 )
        self.ntrunc = ntrunc
        self.gotpix = False
        self.maptype = maptype
#        print 'Applying quick-fix I to multiple moments...'
#        indxpn = LISAresponse.getMLvec( self.ntrunc , 'pn' )
#        xlm = numpy.zeros( numpy.shape(self.xlm) , dtype=complex )
#        for i,ml in enumerate(indxpn):
#            m , l = ml[0] , ml[1]
#            k = indxpn.index( ( -m,l ) )
#            xlm[i] = (-1)**m*self.xlm[k]
#        self.xlm = xlm    
        return
예제 #7
0
파일: hplot.py 프로젝트: citterio/physplit
def InvokeMap(coastfile='/media/sda4/map-data/aust-coast-noaa-2000000-1.dat',
		    lllon=80,
		    urlon=166,
		    lllat=-47,
		    urlat=-9,
		    draw_map=True):
    global PYLIB_PATH

    map = Basemap(projection='cyl',
			llcrnrlon=lllon,
			urcrnrlon=urlon,
			llcrnrlat=lllat,
			urcrnrlat=urlat,
			#lat_ts=-35,
			lat_0=-35,
			lon_0=120,
			resolution='l',
			area_thresh=1000.)


    try: 
	coast = p.load(coastfile)
	coast = p.load(coastfile)
	coast_x,coast_y = map(coast[:,0],coast[:,1])
	p.plot(coast_x,coast_y,color='black')    
    except IOError:
	map.drawcoastlines()

    map.drawmapboundary()
    map.drawmeridians(p.arange(0,360,10),labels=[0,0,1,0])
    map.drawparallels(p.arange(-90,0,10),labels=[1,0,0,0])

    return map
예제 #8
0
파일: covfit.py 프로젝트: fspaolo/code
def plot_covs(filein, fileout):
    import pylab as P
    data1 = P.load(filein)
    data2 = P.load(fileout)
    P.plot(data1[:,0], data1[:,1], 'o')
    P.plot(data2[:,0], data2[:,1])
    P.grid('True')
    P.show()
예제 #9
0
def plot_covs(filein, fileout):
    import pylab as P
    data1 = P.load(filein)
    data2 = P.load(fileout)
    P.plot(data1[:, 0], data1[:, 1], 'o')
    P.plot(data2[:, 0], data2[:, 1])
    P.grid('True')
    P.show()
def compute_wishart_A(p):
    g = pylab.load('81vectors.txt')
    B = prepareB(math.sqrt(1500.0)*g)
    ew = [0.0015,0.0004,0.0004]
    ev1 = pylab.load('81vectors.txt')
    ev2 = pylab.load('321vectors.txt')
    A1 = assemble_wishart_matrix(B,ev1,ew,p)
    A2 = assemble_wishart_matrix(B,ev2,ew,p)
    return A1,A2
예제 #11
0
def compute_wishart_A(p):
    g = pylab.load('81vectors.txt')
    B = prepareB(math.sqrt(1500.0) * g)
    ew = [0.0015, 0.0004, 0.0004]
    ev1 = pylab.load('81vectors.txt')
    ev2 = pylab.load('321vectors.txt')
    A1 = assemble_wishart_matrix(B, ev1, ew, p)
    A2 = assemble_wishart_matrix(B, ev2, ew, p)
    return A1, A2
예제 #12
0
파일: info.py 프로젝트: JohanComparat/pyLPT
def getParamCovMat(prefix,dlogpower = 2, theoconstmult = 1.,dlogfilenames = ['dlogpnldloga.dat'],volume=256.**3,startki = 0, endki = 0, veff = [0.]):
    """
    Calculates parameter covariance matrix from the power spectrum covariance matrix and derivative term
    in the prefix directory
    """
    nparams = len(dlogfilenames)

    kpnl = M.load(prefix+'pnl.dat')
    k = kpnl[startki:,0]

    nk = len(k)
    if (endki == 0):
        endki = nk
        
    pnl = M.array(kpnl[startki:,1],M.Float64)
    covarwhole = M.load(prefix+'covar.dat')
    covar = covarwhole[startki:,startki:]
    if len(veff) > 1:
        sqrt_veff = M.sqrt(veff[startki:])
    else:
        sqrt_veff = M.sqrt(volume*M.ones(nk))

    dlogs = M.reshape(M.ones(nparams*nk,M.Float64),(nparams,nk))
    paramFishMat = M.reshape(M.zeros(nparams*nparams*(endki-startki),M.Float64),(nparams,nparams,endki-startki))
    paramCovMat = paramFishMat * 0.

    # Covariance matrices of dlog's
    for param in range(nparams):
        if len(dlogfilenames[param]) > 0:
            dlogs[param,:] = M.load(prefix+dlogfilenames[param])[startki:,1]

    normcovar = M.zeros(M.shape(covar),M.Float64)
    for i in range(nk):
        normcovar[i,:] = covar[i,:]/(pnl*pnl[i])

    M.save(prefix+'normcovar.dat',normcovar)

    f = k[1]/k[0]

    if (volume == -1.):
        volume = (M.pi/k[0])**3

    #theoconst = volume * k[1]**3 * f**(-1.5)/(12.*M.pi**2) #1 not 0 since we're starting at 1
    for ki in range(1,endki-startki):
        for p1 in range(nparams):
            for p2 in range(nparams):
                paramFishMat[p1,p2,ki] = M.sum(M.sum(\
                M.inverse(normcovar[:ki+1,:ki+1]) *
                M.outerproduct(dlogs[p1,:ki+1]*sqrt_veff[:ki+1],\
                               dlogs[p2,:ki+1]*sqrt_veff[:ki+1])))
                
                
        paramCovMat[:,:,ki] = M.inverse(paramFishMat[:,:,ki])

    return k[1:],paramCovMat[:,:,1:]
예제 #13
0
def test_mrf_EM():
    """EXAMPLE: EM learning on a MRF"""
    """Define MRF graph structure"""
    C = 0
    S = 1
    R = 2
    W = 3
    nodes = 4
    adj_mat = sparse.lil_matrix((nodes, nodes), dtype=int)
    adj_mat[C, [R, S]] = 1
    adj_mat[R, W] = 1
    adj_mat[S, W] = 1
    adj_mat[R, S] = 1

    """Define clique domains and node sizes"""
    ns = 2 * np.ones((1, nodes))
    clq_doms = [[0], [0, 1], [0, 2], [1, 2, 3]]

    """Define cliques and potentials"""
    clqs = []
    clqs.append(cliques.discrete_clique(0, clq_doms[0], np.array([2])))
    clqs.append(cliques.discrete_clique(1, clq_doms[1], np.array([2, 2])))
    clqs.append(cliques.discrete_clique(2, clq_doms[2], np.array([2, 2])))
    clqs.append(cliques.discrete_clique(3, clq_doms[3], np.array([2, 2, 2])))

    """Create the MRF"""
    net = models.mrf(adj_mat, ns, clqs)
    
    """
    Load the samples, and set one sample of one node to be unobserved, this
    should not effect the learnt parameter much, and will demonstrate that
    the algorithm can handle unobserved samples.
    """
    samples = (np.array(pylab.load('./Data/lawn_samples.txt')) - 1).tolist()
    samples[0][0] = []

    """Learn the parameters"""
    net.learn_params_EM(samples[:])
   
    """Initialize the inference engine"""
    net.init_inference_engine(exact=True)

    """Create and enter evidence"""
    evidences = create_all_evidence(4, 2)
    mlcs = np.array([[0, 0, 0, 0]])
    for evidence in evidences:
        mlc = net.max_sum(evidence)
        mlcs = np.vstack((mlcs, mlc))
   
    """Read in expected values"""
    exp_mlcs = np.array(pylab.load('./Data/mrf_em_exact_max_sum_res.txt'))

    """Assert that the output matched the expected values"""
    assert_array_equal(mlcs, exp_mlcs)
예제 #14
0
def degraderesolution(prefix,factor,dlogstring):
    covar = M.load(prefix+'covar.dat')
    pnl = M.load(prefix+'pnl.dat')
    dlog = M.load(prefix+dlogstring)[:,1]
    k = pnl[:,0]*1.
    p = pnl[:,1]*1.
    gausspart = M.load(prefix+'gausspart.dat')
    nbins = len(k)

    nongausspart = covar - gausspart

    nongausspartnew = nongausspart[:nbins-factor:factor,:nbins-factor:factor]*0.
    knew = k[:nbins-factor:factor]*0.
    pnew = p[:nbins-factor:factor]*0.
    gausspartnew = gausspart[:nbins-factor:factor,:nbins-factor:factor]*0.
    nbinsnew = len(knew)
    dlognew = dlog[:nbins-factor:factor]*0.

    for i1 in range(0,nbins-factor,factor):
        i1new = i1/factor
        print i1,i1+factor-1,nbins
        print i1new,nbinsnew
        weights = k[i1:i1+factor-1]**3
        sumweights = M.sum(weights)
        pnew[i1new] = M.sum(p[i1:i1+factor-1]*weights)/sumweights
        knew[i1new] = M.sum(k[i1:i1+factor-1]*weights)/sumweights
        dlognew[i1new] = M.sum(dlog[i1:i1+factor-1]*weights)/sumweights

    sqrtkfact = M.sqrt(k[1]/k[0])
        
    for i1 in range(0,nbins-factor,factor):
        i1new = i1/factor
        for i2 in range(0,nbins-factor,factor):
            i2new = i2/factor
                                                                       
            weights2 = M.outer(k[i1:i1+factor-1]**3,k[i2:i2+factor-1]**3)
            sumweights2 = M.sum(M.sum(weights2))
            nongausspartnew[i1new,i2new] = M.sum(M.sum(nongausspart[i1:i1+factor-1,i2:i2+factor-1]*weights2))/sumweights2

            if i1new == i2new:
                vk = (4.*M.pi/3.)*((k[i1+factor-1]*sqrtkfact)**3 - (k[i1]/sqrtkfact)**3)
                gausspartnew[i1new,i2new] = (2.*M.pi)**3 * 2.*(pnew[i1new]**2)/vk
                                                                       
    covarnew = gausspartnew + nongausspartnew

    prefixnew = prefix+'degrade'+str(factor)+'/'
    os.system('mkdir '+prefixnew)
    M.save(prefixnew+'pnl.dat',M.transpose([knew,pnew]), fmt = '%18.16e')
    M.save(prefixnew+'covar.dat',covarnew, fmt = '%18.16e')
    M.save(prefixnew+'gausspart.dat',gausspartnew, fmt = '%18.16e')
    M.save(prefixnew+dlogstring,M.transpose([knew,dlognew]), fmt = '%18.16e')
    M.save(prefix+'nbins.dat',M.array([nbinsnew],shape=(1,1,)), fmt = '%d')
예제 #15
0
파일: Utilities2.py 프로젝트: qAp/LisaMapp
 def __init__(self,fishrpath,fishipath,fishtype):
     fishr , fishi = pylab.load(fishrpath) , pylab.load(fishipath)
     try:
         assert numpy.shape(fishr) == numpy.shape(fishi)
         self.fish = fishr + 1j*fishi
     except AssertionError:
         print 'The two arrays loaded must have the same dimensions.'
         raise
     self.fishtype = fishtype
     self.ntrunc = int( numpy.sqrt( numpy.shape(self.fish)[0] ) - 1 )
     self.decomposed  = False
     self.regularised = False
     return
예제 #16
0
파일: Utilities2.py 프로젝트: qAp/LisaMapp
 def __init__(self, fishrpath, fishipath, fishtype):
     fishr, fishi = pylab.load(fishrpath), pylab.load(fishipath)
     try:
         assert numpy.shape(fishr) == numpy.shape(fishi)
         self.fish = fishr + 1j * fishi
     except AssertionError:
         print 'The two arrays loaded must have the same dimensions.'
         raise
     self.fishtype = fishtype
     self.ntrunc = int(numpy.sqrt(numpy.shape(self.fish)[0]) - 1)
     self.decomposed = False
     self.regularised = False
     return
예제 #17
0
def GetSparseMatrix(psi, config):
    matrix = pylab.load("d130_50stk-matel")
    row = array(matrix[:, 0], dtype=int) - 1
    col = array(matrix[:, 1], dtype=int) - 1
    matelem = array(matrix[:, 2], dtype=complex)

    return row, col, matelem
예제 #18
0
def LoadColormap(filename, reverse=False):
    data = pylab.load(filename)

    samples = len(data) / 4
    t = linspace(0, 1, samples)
    r = list(data[0::4])
    g = list(data[1::4])
    b = list(data[2::4])

    if reverse:
        r.reverse()
        g.reverse()
        b.reverse()

    red = []
    green = []
    blue = []

    for i in range(samples):

        red.append((t[i], r[i], r[i]))
        green.append((t[i], g[i], g[i]))
        blue.append((t[i], b[i], b[i]))

    cdict = {"red": red, "green": green, "blue": blue}
    cmap = matplotlib.colors.LinearSegmentedColormap("my_colors", cdict, 1024)

    return cmap
예제 #19
0
def LoadColormapMirrored(filename):
	data = pylab.load(filename)

	samples = len(data)/2
	t = linspace(0,1,samples)
	r = list(data[0::4])
	g = list(data[1::4])
	b = list(data[2::4])

	r.reverse()
	g.reverse()
	b.reverse()

	r = list(reversed(b)) + r 
	g = list(reversed(g)) + g  
	b = list(reversed(r)) + b 

	red = []
	green = []
	blue = []

	for i in range(samples):

		red.append((t[i], r[i], r[i]))
		green.append((t[i], g[i], g[i]))
		blue.append((t[i], b[i], b[i]))

	cdict = { "red": red, "green": green, "blue": blue }
	cmap = matplotlib.colors.LinearSegmentedColormap("my_colors", cdict, 1024)

	return cmap
예제 #20
0
파일: fftTools.py 프로젝트: msyriac/flipper
def readBinnedPower(file):
    """
    @brief reads in a binned power spectrum from a file
    The file must have columns specficed as : binLeft,binRight,l,cl
    """
    binLeft,binRight,l,cl = pylab.load(file,skiprows= 50,unpack=True,usecols=[0,1,2,3])
    return l,cl
예제 #21
0
파일: example.py 프로젝트: AtomAleks/PyProp
def GetSparseMatrix(psi, config):
	matrix = pylab.load("d130_50stk-matel")
	row = array(matrix[:,0], dtype=int) - 1
	col = array(matrix[:,1], dtype=int) - 1
	matelem = array(matrix[:,2], dtype=complex)

	return row, col, matelem
예제 #22
0
def makeplot(filename):
    T0 = 2452525.374416
    P = 0.154525
    
    X = pl.load(filename)
    x = X[:,0]
    y = X[:,1]
    print x[0] # check for HJD faults
    
    #orbital phase
    p = (x-T0)/P
    
    pl.figure(figsize=(6,4))
    pl.subplots_adjust(hspace=0.47,left=0.16)
    
    pl.subplot(211)
    pl.scatter(p,y,marker='o',s=0.1,color='k')
    pl.ylim(-0.06,0.06)
    pl.xlim(pl.average(p)-1.25,pl.average(p)+1.25)
    pl.ylabel('Intensity')
    pl.xlabel('Orbital Phase')
    
    pl.subplot(212)
    f,a = ast.signal.dft(x,y,0,4000,1)
    pl.plot(f,a,'k')
    pl.ylabel('Amplitude')
    pl.xlabel('Frequency (c/d)')
    #pl.ylim(yl[0],yl[1])
    
    #pl.vlines(3636,0.002,0.0025,color='k',linestyle='solid')
    #pl.vlines(829,0.002,0.0025,color='k',linestyle='solid')
    #pl.text(3500,0.00255,'DNO',fontsize=11)
    #pl.text(700,0.00255,'lpDNO',fontsize=11)
    pl.ylim(0.0,0.004)
    pl.savefig('%spng'%filename[:-3])
예제 #23
0
def readBinnedPower(file):
    """
    @brief reads in a binned power spectrum from a file
    The file must have columns specficed as : binLeft,binRight,l,cl
    """
    binLeft,binRight,l,cl = pylab.load(file,skiprows= 50,unpack=True,usecols=[0,1,2,3])
    return l,cl
예제 #24
0
    def estimate_affinity(self, affinity, file_affinity):

        if affinity == "euclidean":
            self._metric = DistanceMetric.get_metric("euclidean")
        elif affinity == "hellinger":
            self._metric = DistanceMetric.get_metric(hellinger_distance)
            assert (self._nfeat > 1
                    ), f"can't estimate hellinger distance with {self._nfeat} \
                                        features, needed >1 features"

            try:
                self._distance_matr = pl.load(file_affinity)
                if self.verbose:
                    print(f"Loading affinity from {file_affinity}")
                return
            except FileNotFoundError:
                warnings.warn(
                    "Warning: Recomputing the Hellinger distance, this might take a while... "
                )
                self._metric = DistanceMetric.get_metric(hellinger_distance)
        else:
            raise ValueError(
                f" Affinity  '{affinity}' not recognized,"
                "  please use either  'euclidean' or 'hellinger' ")

        self._distance_matr = self._metric.pairwise(self._X)
        self._distance_matr = pl.ma.fix_invalid(self._distance_matr,
                                                fill_value=1.0).data
예제 #25
0
파일: beadmask.py 프로젝트: Jorges1000/TS
 def load_from_pylab(cls, pl, r0, r1):
     import pylab
     raw_beads = pylab.load(pl)
     span = r1 - r0
     table = [set() for i in range(span)]
     for r,c in raw_beads:
         table[int(r)-r0].add(int(c))
     return len(raw_beads), table
예제 #26
0
def load_default(path, closure):
    from pylab import load, save
    try:
        return load(path)
    except IOError: 
        obj = closure()
        save(obj, path)
        return obj
예제 #27
0
파일: example.py 프로젝트: AtomAleks/PyProp
def GetSparseMatrix(psi, config):
	#matrix = pylab.load("/home/raymond/sci/dev/Krotov/matrixElements/d130_50stk-matel")
	matrix = pylab.load("/home/raymond/sci/dev/qdot4d/CreateMatrixElements/lene/d130_50stk_newRay-matel")
	row = array(matrix[:,0], dtype=int) - 1
	col = array(matrix[:,1], dtype=int) - 1
	matelem = array(matrix[:,2], dtype=complex)

	return row, col, matelem
예제 #28
0
def rejuice(d63,d63_2,d63_4,d63_8):
    #pinit = M.load('mill/s63/pm.pnl.dat')

    p1 = M.load('mill/s63/pm.pnl.dat')
    plog1 = M.load('mill/s63/plogm.pnl.dat')
    p2 = M.load('mill/s63r2/pm.pnl.dat')
    plog2 = M.load('mill/s63r2/plogm.pnl.dat')
    p4 = M.load('mill/s63r4/pm.pnl.dat')
    plog4 = M.load('mill/s63r4/plogm.pnl.dat')
    p8 = M.load('mill/s63r8/pm.pnl.dat')
    plog8 = M.load('mill/s63r8/plogm.pnl.dat')

    f63= N.exp(-N.mean(N.log(d63.flatten())))
    f63_2= N.exp(-N.mean(N.log(d63_2.flatten())))
    f63_4= N.exp(-N.mean(N.log(d63_4.flatten())))
    f63_8= N.exp(-N.mean(N.log(d63_8.flatten())))

    #M.loglog(p1[:,0],p1[:,1]/(plog1[:,1]*f63),'b--')
    #M.loglog(p2[:,0],p2[:,1]/(plog2[:,1]*f63_2),'g--')
    #M.loglog(p4[:,0],p4[:,1]/(plog4[:,1]*f63_4),'r--')
    #M.loglog(p8[:,0],p8[:,1]/(plog8[:,1]*f63_8),'y--')

    #xis = N.mean(d63.flatten()**2)
    #xis_2 = N.mean(d63_2.flatten()**2)
    #xis_4 = N.mean(d63_4.flatten()**2)
    #xis_8 = N.mean(d63_8.flatten()**2)

    xis = (1.+ 0.5*N.sqrt(N.var(d63.flatten())))
    xis_2 = (1.+0.5*N.sqrt(N.var(d63_2.flatten())))
    xis_4 = (1.+0.5*N.sqrt(N.var(d63_4.flatten())))
    xis_8 = 1.+0.5*N.sqrt(N.var(d63_8.flatten()))
    
    print 'exps:',f63,f63_2,f63_4,f63_8
    print 'xis:',xis, xis_2,xis_4,xis_8

    M.loglog(plog1[:,0],p1[:,1]/(plog1[:,1]*f63)*(1.+2.*xis**2),'b')
    M.loglog(plog2[:,0],p2[:,1]/(plog2[:,1]*f63_2)*(1.+2.*xis_2**2),'g')
    M.loglog(plog4[:,0],p4[:,1]/(plog4[:,1]*f63_4)*(1.+2.*xis_4**2),'r')
    M.loglog(plog8[:,0],p8[:,1]/(plog8[:,1]*f63_8)*(1.+2.*xis_8**2),'y')

    M.loglog(plog1[:,0],p1[:,1]/(plog1[:,1]*xis),'b')
    M.loglog(plog2[:,0],p2[:,1]/(plog2[:,1]*xis_2),'g')
    M.loglog(plog4[:,0],p4[:,1]/(plog4[:,1]*xis_4),'r')
    M.loglog(plog8[:,0],p8[:,1]/(plog8[:,1]*xis_8),'y')


    M.xlabel(r'$k\ [\rm{Mpc}/h]$',fontsize=20)
    M.ylabel(r'$P_\delta(k)/P_{\log (1+\delta)}(k)$',fontsize=20)

    bias1 = N.sum(p1[:5,1]*p1[:5,2])/N.sum(plog1[:5,1]*plog1[:5,2])
    bias2 = N.sum(p2[:5,1]*p2[:5,2])/N.sum(plog2[:5,1]*plog2[:5,2])
    bias4 = N.sum(p4[:5,1]*p4[:5,2])/N.sum(plog4[:5,1]*plog4[:5,2])
    bias8 = N.sum(p8[:5,1]*p8[:5,2])/N.sum(plog8[:5,1]*plog8[:5,2])

    print bias1,bias2,bias4,bias8#, N.log(bias1),N.log(bias2),N.log(bias4)       
    M.show()
예제 #29
0
def test_mrf_EM():
    """EXAMPLE: EM learning on a MRF"""
    """Define MRF graph structure"""
    C = 0
    S = 1
    R = 2
    W = 3
    nodes = 4
    adj_mat = sparse.lil_matrix((nodes, nodes), dtype=int)
    adj_mat[C, [R, S]] = 1
    adj_mat[R, W] = 1
    adj_mat[S, W] = 1
    adj_mat[R, S] = 1
    """Define clique domains and node sizes"""
    ns = 2 * np.ones((1, nodes))
    clq_doms = [[0], [0, 1], [0, 2], [1, 2, 3]]
    """Define cliques and potentials"""
    clqs = []
    clqs.append(cliques.discrete_clique(0, clq_doms[0], np.array([2])))
    clqs.append(cliques.discrete_clique(1, clq_doms[1], np.array([2, 2])))
    clqs.append(cliques.discrete_clique(2, clq_doms[2], np.array([2, 2])))
    clqs.append(cliques.discrete_clique(3, clq_doms[3], np.array([2, 2, 2])))
    """Create the MRF"""
    net = models.mrf(adj_mat, ns, clqs)
    """
    Load the samples, and set one sample of one node to be unobserved, this
    should not effect the learnt parameter much, and will demonstrate that
    the algorithm can handle unobserved samples.
    """
    samples = (np.array(pylab.load('./Data/lawn_samples.txt')) - 1).tolist()
    samples[0][0] = []
    """Learn the parameters"""
    net.learn_params_EM(samples[:])
    """Initialize the inference engine"""
    net.init_inference_engine(exact=True)
    """Create and enter evidence"""
    evidences = create_all_evidence(4, 2)
    mlcs = np.array([[0, 0, 0, 0]])
    for evidence in evidences:
        mlc = net.max_sum(evidence)
        mlcs = np.vstack((mlcs, mlc))
    """Read in expected values"""
    exp_mlcs = np.array(pylab.load('./Data/mrf_em_exact_max_sum_res.txt'))
    """Assert that the output matched the expected values"""
    assert_array_equal(mlcs, exp_mlcs)
예제 #30
0
def test_bnet_EM():
    """EXAMPLE: EM learning on a BNET"""
    """Create all data required to instantiate the bnet object"""
    nodes = 4
    dag = np.zeros((nodes, nodes))
    C = 0
    S = 1
    R = 2
    W = 3
    dag[C, [R, S]] = 1
    dag[R, W] = 1
    dag[S, W] = 1
    ns = 2 * np.ones((1, nodes))

    """Instantiate the model"""
    net = models.bnet(dag, ns, [])


    """
    Load the samples, and set one sample of one node to be unobserved, this
    should not effect the learnt parameter much, and will demonstrate that
    the algorithm can handle unobserved samples.
    """
    samples = (np.array(pylab.load('./Data/lawn_samples.txt')) - 1).tolist()
    samples[0][0] = []

    """Learn the parameters"""
    net.learn_params_EM(samples[:])
   
    """Initialize the inference engine"""
    net.init_inference_engine(exact=True)

    """Create and enter evidence"""
    evidences = create_all_evidence(4, 2)
    mlcs = np.array([[0, 0, 0, 0]])
    for evidence in evidences:
        mlc = net.max_sum(evidence)
        mlcs = np.vstack((mlcs, mlc))

    """Read in expected values"""
    exp_mlcs = np.array(pylab.load('./Data/bnet_mle_exact_max_sum_res.txt'))

    """Assert that the output matched the expected values"""
    assert_array_equal(mlcs, exp_mlcs)
예제 #31
0
   def OnButton(self, evt):
       '''Handle button click event'''
       # Get title of clicked button
       label = evt.GetEventObject().GetLabel()

       if label == "Get Atmospheric Factors": # Calculate
           try:
               sampleLat = float(self.lat.GetValue())
               sampleLon = float(self.lon.GetValue())

               NCEP = load(self.repo.GetClimateDataPath())
               Temperature = NCEP[0:73,:];seaLevelPress = NCEP[73:146,:];
               LapseRate = NCEP[146:219,:];topo = NCEP[219:292,:]
               Temperature = NCEP[73:0:-1,:];seaLevelPress = NCEP[146:73:-1,:];
               LapseRate = NCEP[219:146:-1,:];topo = NCEP[292:73:-1,:]

               lat = arange(90,-91,-2.5);lon = arange(0, 361,2.5)

               #localCoords is the site coordinates relative to the NCEP data coords
               #For interpolation the field is considered to bound 1 -> nx-1 , 1 -> ny-1
               xfac = len(lat) - 1
               yfac = len(lon) - 1
               localX = (max(lat) - sampleLat) * xfac / (max(lat) - min(lat)) + 1
               localY = sampleLon / max(lon) * yfac + 1
               localCoords = array([[ localX],[ localY ]])

               AnnualMeanSLP = ndimage.map_coordinates(seaLevelPress, localCoords)
               AnnualMeanTemp = ndimage.map_coordinates(Temperature, localCoords)
               AnnualMeanLapse = ndimage.map_coordinates(LapseRate, localCoords)

               sltempVal = "%3.1f" % (float(AnnualMeanTemp))
               slprecVal = "%3.1f" % (float(AnnualMeanSLP))
               LapseRate = "%3.1f" % (float(AnnualMeanLapse*-1))

                # Ignore empty calculation
               #if not compute.strip():
               if not sltempVal:
                   return

               # Calculate result
               # result = eval(compute)

               # Add to history
               self.sltemp.Insert(str(sltempVal), 0)
               self.slprec.Insert(str(slprecVal), 0)
               self.lapse.Insert(str(LapseRate), 0)
              
               # Show result
               #self.display.SetValue(str(result))
               self.sltemp.SetValue(str(sltempVal))
               self.slprec.SetValue(str(slprecVal))
               self.lapse.SetValue(str(LapseRate))
               #self.slprec.SetValue(str(slprecVal))
           except Exception, e:
               wx.LogError(str(e))
               return
예제 #32
0
def makeplot(X,hjd,filename,xlo,xhi):

    # archive ephem
    T0 = 2452525.374416
    # august ephem
    #T0 = 2453964.330709
    P = 0.154525

    #   set some lower and upper time axis limits. set xlo to None for auto limits
    xlo = xlo
    xhi = xhi

    X = pl.load(filename)
    a = X[:,0][:-1]
    p = X[:,1][:-1]
    x = (X[:,2][:-1]+hjd-T0)/P - int(((X[:,2][:-1]+hjd-T0)/P)[0])
    #x = X[:,2][:-1]    
    siga = X[:,3][:-1]
    sigp = X[:,4][:-1]
    
    pl.figure(figsize=(6,4))
    pl.subplots_adjust(left=0.14,hspace=0.001)

    # plot the amplitude
    ax1 = pl.subplot(211)
    pl.errorbar(x,a,siga,fmt='ro')
    pl.xlabel('Orbital Phase')
    pl.ylabel('Amplitude')
    yt = pl.yticks()
    ax1.set_yticks(yt[0][1:-1])
    if xlo != None:
        pl.xlim(xlo,xhi)
    else:
        pl.xlim(min(x)-0.02, max(x)+0.02)
    pl.grid()
   
    # plot the phase
    ax2 = pl.subplot(212)
    pl.errorbar(x,p,sigp,fmt='go')
    pl.xlabel('Orbital Phase')
    pl.ylabel('Phase (O-C)')
    yt = pl.yticks()
    ax2.set_yticks(yt[0][1:-1])
    if xlo != None:
        pl.xlim(xlo,xhi)
    else:
        pl.xlim(min(x)-0.02, max(x)+0.02)
    pl.grid()
    #pl.ylim(-1.0,0.5)
    # remove the amplitude graph's x-axis
    pl.setp(ax1.get_xticklabels() , visible=False)
    
    #pl.savefig(filename[:-3]+'png')


    pl.show()
예제 #33
0
def GetSparseMatrix(psi, config):
    #matrix = pylab.load("/home/raymond/sci/dev/Krotov/matrixElements/d130_50stk-matel")
    matrix = pylab.load(
        "/home/raymond/sci/dev/qdot4d/CreateMatrixElements/lene/d130_50stk_newRay-matel"
    )
    row = array(matrix[:, 0], dtype=int) - 1
    col = array(matrix[:, 1], dtype=int) - 1
    matelem = array(matrix[:, 2], dtype=complex)

    return row, col, matelem
예제 #34
0
파일: bosGUI.py 프로젝트: zwghit/CFCFD-NG
def histogram_u_v():

    from pylab import figure, hist, title, load, xlabel, ylabel
    from scipy import where, logical_or, flipud

    u = load('vecu.out.npy')
    v = load('vecv.out.npy')
    u = flipud(u)
    v = -flipud(v)
    figure()
    hist(u, 100)
    title('Frequency Distribution of horizontal displacements data')
    xlabel('Horizontal Displacement in Pixels')
    ylabel('Frequency')
    figure()
    hist(v, 100)
    title('Frequency Distribution of vertical displacements data')
    xlabel('Vertical Displacement in Pixels')
    ylabel('Frequency')
예제 #35
0
	def load_csv(self,f):
		"""
		Loading data from a csv file. Uses pylab's load function. Seems much faster
		than scipy.io.read_array.
		"""
		varnm = f.readline().split(',')

		# what is the date variable's key if any, based on index passed as argument
		if self.date_key != []:
			rawdata = pylab.load(f, delimiter=',',converters={self.date_key[0]:pylab.datestr2num})			# don't need to 'skiprow' here
			self.date_key = varnm[self.date_key[0]]
		else:
			rawdata = pylab.load(f, delimiter=',')															# don't need to 'skiprow' here

		# making sure that the variable names contain no leading or trailing spaces
		varnm = [i.strip() for i in varnm]

		# transforming the data into a dictionary
		self.data = dict(zip(varnm,rawdata.T))
예제 #36
0
파일: Utilities2.py 프로젝트: qAp/LisaMapp
 def __init__(self,realrpath,realipath,imagrpath,imagipath,
              FreqOffset,Cadence):
     realr = pylab.load(realrpath)
     reali = pylab.load(realipath)
     imagr = pylab.load(imagrpath)
     imagi = pylab.load(imagipath)
     try:
         assert numpy.shape(realr)==numpy.shape(reali)==numpy.shape(imagr)==numpy.shape(imagi)
         self.realr = realr
         self.reali = reali
         self.imagr = imagr
         self.imagi = imagi
     except AssertionError:
         print 'The four arrays loaded need to have the same dimensions.'
     self.FreqOffset = FreqOffset
     self.Cadence = Cadence
     self.Length = numpy.shape(self.realr)[1]
     print 'Orf multiple moments files read successfully!'
     return
예제 #37
0
파일: ellipse.py 프로젝트: limu007/Charlay
def neboj_load(name,dlot=0):
    from pylab import load
    dut=load(name,skiprows=2)
    e1=dut[0][2::4]
    e2=dut[1][2::4]#.reshape(832,4)[:,0]
    f=dut[:,:2].transpose()
    g=dut[:,2:].reshape(dut.shape[0],832,4)
    if dlot:
        [plot(e1,q[:,0]) for q in e[::dlot]]
        legend(f[1][::dlot])
    return g,f,e1,e2
예제 #38
0
파일: Utilities2.py 프로젝트: qAp/LisaMapp
 def __init__(self, realrpath, realipath, imagrpath, imagipath, FreqOffset,
              Cadence):
     realr = pylab.load(realrpath)
     reali = pylab.load(realipath)
     imagr = pylab.load(imagrpath)
     imagi = pylab.load(imagipath)
     try:
         assert numpy.shape(realr) == numpy.shape(reali) == numpy.shape(
             imagr) == numpy.shape(imagi)
         self.realr = realr
         self.reali = reali
         self.imagr = imagr
         self.imagi = imagi
     except AssertionError:
         print 'The four arrays loaded need to have the same dimensions.'
     self.FreqOffset = FreqOffset
     self.Cadence = Cadence
     self.Length = numpy.shape(self.realr)[1]
     print 'Orf multiple moments files read successfully!'
     return
예제 #39
0
def comp_condA_wishart():
    g = pylab.load('81vectors.txt')
    B = prepareB(math.sqrt(1500.0) * g)
    p_list = [(i + 1) * (i + 1) + 1 for i in range(17)]
    ew = [0.0015, 0.0004, 0.0004]
    ev1 = pylab.load('81vectors.txt')
    ev2 = pylab.load('321vectors.txt')
    condA_list1 = []
    condA_list2 = []
    for p in p_list:
        A = assemble_wishart_matrix(B, ev1, ew, p)
        U, S, V = numpy.linalg.svd(A)
        condA = S[0] / S[-1]
        condA_list1.append(condA)

        A = assemble_wishart_matrix(B, ev2, ew, p)
        U, S, V = numpy.linalg.svd(A)
        condA = S[0] / S[-1]
        condA_list2.append(condA)

    return p_list, condA_list1, condA_list2
def addToPlots( timeName ):
	fileName = timeName+'/turboPerformance.dat'
	time, head, TOmega, eff, Fx, Fy, Fz = pl.load( fileName ,skiprows=1,usecols=(0,1,2,3,4,5,6),unpack=True )
	
	pl.figure(1);
	pl.plot( time, head )  # fig1 active
	pl.figure(2); 
	pl.plot( time, TOmega )# fig2 active
	pl.figure(3); 
	pl.plot( time, eff )   # fig3 active	
	pl.figure(4); 
	pl.plot( time, Fx, time, Fy )# fig4 active     
예제 #41
0
파일: fit.py 프로젝트: rouckas/chemik
def fitfuncgen(p):
    saveconfig(p)
    import os
    os.system("./kyslik")

    from pylab import load
    result = load("result.dat")

    x = result[:,0]
    y = result[:,1:]
    from scipy.interpolate import interp1d
    return interp1d(x,y,axis=0, bounds_error=False)
def comp_condA_wishart():
    g = pylab.load('81vectors.txt')
    B = prepareB(math.sqrt(1500.0)*g)
    p_list = [(i+1)*(i+1)+1 for i in range(17)]
    ew = [0.0015,0.0004,0.0004]
    ev1 = pylab.load('81vectors.txt')
    ev2 = pylab.load('321vectors.txt')
    condA_list1 = []
    condA_list2 = []
    for p in p_list:
        A = assemble_wishart_matrix(B,ev1,ew,p)
        U,S,V = numpy.linalg.svd(A)
        condA = S[0]/S[-1]
        condA_list1.append(condA)

        A = assemble_wishart_matrix(B,ev2,ew,p)
        U,S,V = numpy.linalg.svd(A)
        condA = S[0]/S[-1]
        condA_list2.append(condA)

    return p_list,condA_list1,condA_list2
예제 #43
0
def showpplog(prefix,color,fact=1.,xis=1.,xislog=1.,sumto=10,camb=0,cellsize=0):
    p = M.load(prefix+'/pm.pnl.dat')
    plog = M.load(prefix+'/plogm.pnl.dat')

    # all times xis
    sump = N.sum(M.load(prefix+'c11')[:sumto,1]*p[:sumto,2])
    c21xis = N.sum(M.load(prefix+'c21')[:sumto,1]*p[:sumto,2])/sump
    c22xis = N.sum(M.load(prefix+'c22')[:sumto,1]*p[:sumto,2])/sump
    c31xis = N.sum(M.load(prefix+'c31')[:sumto,1]*p[:sumto,2])/sump
    
    #bias = N.sum(p[:sumto,1]*p[:sumto,2])/N.sum(plog[:sumto,1]*plog[:sumto,2])
    bias = N.sum((p[:sumto,1]/plog[:sumto,1]) * p[:sumto,2])/N.sum(p[:sumto,2])
    biaserror = N.std(p[:sumto,1]/plog[:sumto,1])
    simpleapprox = 1./(1.-0.44*xis)

    c21 = camb.c21(cellsize)
    c22 = camb.c22(cellsize)
    c31 = camb.c31(cellsize)
    s3 = camb.s3(cellsize)
    #print cellsize,c21, c21/xis
    approx = 1./(1+xis*(2.-c21))
    approx2 = 1./(1+xis*(2-c21)+xis**2*(7-2*s3-4*c21 + 2.*c31/3. + c22/4.))
    #print bias,simpleapprox,approx,approx2
    print bias,biaserror

    M.loglog([cellsize],[simpleapprox-1],'yo')
    M.loglog([cellsize],[approx-1],'rp')
    M.loglog([cellsize],[approx2-1.],'bh')
    M.loglog([cellsize],[fact-1.],'gD')
    M.loglog([cellsize],[bias-1],'k.')
예제 #44
0
파일: bosGUI.py 프로젝트: zwghit/CFCFD-NG
def showColourSchlieren():

    global colourlevelVar
    from scipy import arctan2, shape, r_, pi, sqrt, zeros, shape
    from pylab import contourf, quiver, load, figure, show, imshow
    from colorsys import hsv_to_rgb

    x = load('vecx.out.npy')
    y = load('vecy.out.npy')
    u = load('vecu.out.npy')
    v = load('vecv.out.npy')

    angle = arctan2(v, u)
    colourlevel = float(colourlevelVar.get())
    #convert angle to go from 0 to 2*pi.
    height, width = x.shape
    for i in r_[0:height - 1]:
        for j in r_[0:width - 1]:
            if angle[i, j] < 0:
                angle[i, j] = 2 * pi + angle[i, j]

    magnitude = sqrt(u**2 + v**2)
    max_magnitude = magnitude.max()

    C = zeros((int(height), int(width), int(3)), dtype=float)
    for i in r_[0:height - 1]:
        for j in r_[1:width - 1]:
            h = angle[i, j] / 2 / pi
            s = colourlevel * magnitude[i, j] / max_magnitude
            if s > 1:
                s = 1
            vv = 1
            h, s, vv = hsv_to_rgb(h, s, vv)
            C[i, j, 0] = h
            C[i, j, 1] = s
            C[i, j, 2] = vv
    figure()
    imshow(C)
    show()
    return
예제 #45
0
def FitCubicStrain(datafilename, straintype, cellvolume):
	"""Fits the strain data from datafile to extract corresponding elastic constants.
	The straintype is a string corresponding to standard cubic strain types."""
    
	import CubicStandardStrainTensors as cubic
	from pylab import load
	try:
		data = load(datafilename)
	except: print "Could not load strain data."

	tensor = cubic.__dict__[straintype]
	elastConst = FitCubicElasticConstant(data, tensor, cellvolume)
	return elastConst
예제 #46
0
def comp_condA_spike():
    g = pylab.load('81vectors.txt')
    B = prepareB(math.sqrt(1500.0) * g)
    sigma_list = range(10, 110, 10)
    ew = [1, 0, 0]
    ev1 = pylab.load('81vectors.txt')
    ev2 = pylab.load('321vectors.txt')
    tessellation = pylab.load('vertices_iso_3.txt')
    condA_list1 = []
    condA_list2 = []
    for sigma in sigma_list:

        A = assemble_spike_matrix(B, ev1, ew, sigma, tessellation)
        U, S, V = numpy.linalg.svd(A)
        condA = S[0] / S[-1]
        condA_list1.append(condA)

        A = assemble_spike_matrix(B, ev2, ew, sigma, tessellation)
        U, S, V = numpy.linalg.svd(A)
        condA = S[0] / S[-1]
        condA_list2.append(condA)

    return sigma_list, condA_list1, condA_list2
예제 #47
0
def FitCubicStrain(datafilename, straintype, cellvolume):
    """Fits the strain data from datafile to extract corresponding elastic constants.
	The straintype is a string corresponding to standard cubic strain types."""

    import CubicStandardStrainTensors as cubic
    from pylab import load
    try:
        data = load(datafilename)
    except:
        print "Could not load strain data."

    tensor = cubic.__dict__[straintype]
    elastConst = FitCubicElasticConstant(data, tensor, cellvolume)
    return elastConst
def comp_condA_spike():
    g = pylab.load('81vectors.txt')
    B = prepareB(math.sqrt(1500.0)*g)
    sigma_list = range(10,110,10)
    ew = [1,0,0]
    ev1 = pylab.load('81vectors.txt')
    ev2 = pylab.load('321vectors.txt')
    tessellation = pylab.load('vertices_iso_3.txt')
    condA_list1 = []
    condA_list2 = []
    for sigma in sigma_list:

        A = assemble_spike_matrix(B,ev1,ew,sigma,tessellation)
        U,S,V = numpy.linalg.svd(A)
        condA = S[0]/S[-1]
        condA_list1.append(condA)

        A = assemble_spike_matrix(B,ev2,ew,sigma,tessellation)
        U,S,V = numpy.linalg.svd(A)
        condA = S[0]/S[-1]
        condA_list2.append(condA)

    return sigma_list,condA_list1,condA_list2
예제 #49
0
def test_bnet_EM():
    """EXAMPLE: EM learning on a BNET"""
    """Create all data required to instantiate the bnet object"""
    nodes = 4
    dag = np.zeros((nodes, nodes))
    C = 0
    S = 1
    R = 2
    W = 3
    dag[C, [R, S]] = 1
    dag[R, W] = 1
    dag[S, W] = 1
    ns = 2 * np.ones((1, nodes))
    """Instantiate the model"""
    net = models.bnet(dag, ns, [])
    """
    Load the samples, and set one sample of one node to be unobserved, this
    should not effect the learnt parameter much, and will demonstrate that
    the algorithm can handle unobserved samples.
    """
    samples = (np.array(pylab.load('./Data/lawn_samples.txt')) - 1).tolist()
    samples[0][0] = []
    """Learn the parameters"""
    net.learn_params_EM(samples[:])
    """Initialize the inference engine"""
    net.init_inference_engine(exact=True)
    """Create and enter evidence"""
    evidences = create_all_evidence(4, 2)
    mlcs = np.array([[0, 0, 0, 0]])
    for evidence in evidences:
        mlc = net.max_sum(evidence)
        mlcs = np.vstack((mlcs, mlc))
    """Read in expected values"""
    exp_mlcs = np.array(pylab.load('./Data/bnet_mle_exact_max_sum_res.txt'))
    """Assert that the output matched the expected values"""
    assert_array_equal(mlcs, exp_mlcs)
예제 #50
0
    def load_csv(self, f):
        """
		Loading data from a csv file. Uses pylab's load function. Seems much faster
		than scipy.io.read_array.
		"""
        varnm = f.readline().split(',')

        # what is the date variable's key if any, based on index passed as argument
        if self.date_key != []:
            rawdata = pylab.load(
                f,
                delimiter=',',
                converters={self.date_key[0]:
                            pylab.datestr2num})  # don't need to 'skiprow' here
            self.date_key = varnm[self.date_key[0]]
        else:
            rawdata = pylab.load(f,
                                 delimiter=',')  # don't need to 'skiprow' here

        # making sure that the variable names contain no leading or trailing spaces
        varnm = [i.strip() for i in varnm]

        # transforming the data into a dictionary
        self.data = dict(zip(varnm, rawdata.T))
def addToPlots(timeName):
    fileName = timeName + '/turboPerformance.dat'
    time, head, TOmega, eff, Fx, Fy, Fz = pl.load(fileName,
                                                  skiprows=1,
                                                  usecols=(0, 1, 2, 3, 4, 5,
                                                           6),
                                                  unpack=True)

    pl.figure(1)
    pl.plot(time, head)  # fig1 active
    pl.figure(2)
    pl.plot(time, TOmega)  # fig2 active
    pl.figure(3)
    pl.plot(time, eff)  # fig3 active
    pl.figure(4)
    pl.plot(time, Fx, time, Fy)  # fig4 active
예제 #52
0
def test_mrf_exact_sum_product():
    """EXAMPLE: Junction tree sum-product on MRF"""
    """Define MRF graph structure"""
    C = 0
    S = 1
    R = 2
    W = 3
    nodes = 4
    adj_mat = sparse.lil_matrix((nodes, nodes), dtype=int)
    adj_mat[C, [R, S]] = 1
    adj_mat[R, W] = 1
    adj_mat[S, W] = 1
    adj_mat[R, S] = 1
    """Define clique domains and node sizes"""
    ns = 2 * np.ones((1, nodes))
    clq_doms = [[0, 1, 2], [1, 2, 3]]
    """Define cliques and potentials"""
    clqs = []
    T = np.zeros((2, 2, 2))
    T[:, :, 0] = np.array([[0.2, 0.2], [0.09, 0.01]])
    T[:, :, 1] = np.array([[0.05, 0.05], [0.36, 0.04]])
    clqs.append(cliques.discrete_clique(0, clq_doms[0], np.array([2, 2, 2]),
                                        T))
    T[:, :, 0] = np.array([[1, 0.1], [0.1, 0.01]])
    T[:, :, 1] = np.array([[0, 0.9], [0.9, 0.99]])
    clqs.append(cliques.discrete_clique(1, clq_doms[1], np.array([2, 2, 2]),
                                        T))
    """Create the MRF"""
    net = models.mrf(adj_mat, ns, clqs)
    net.init_inference_engine(exact=True)
    """Create and enter evidence"""
    evidences = create_all_evidence(4, 2)
    results = []
    for evidence in evidences:
        net.sum_product(evidence)
        result = []
        result.append(np.max(net.marginal_nodes([0]).T))
        result.append(np.max(net.marginal_nodes([1]).T))
        result.append(np.max(net.marginal_nodes([2]).T))
        result.append(np.max(net.marginal_nodes([3]).T))
        results.append(result)

    results = np.array(results)
    """Get the expected results"""
    exp_results = np.array(pylab.load('./Data/mrf_exact_sum_product_res.txt'))
    """Assert that the output matched the expected values"""
    assert_array_equal(results, exp_results)
예제 #53
0
def Load(path):
    """
    Read a .txt file 

    :Parameters:
     - `path`: path of the .txt file

    :Types:
     - `path`: string

    :returns: the file
    :returntype: array
    """

    f = pylab.load(path)

    return f
예제 #54
0
def readBinningFile(binningFile):
    """
    @brief reads a binning file.
    Searches for the file in Flipper params dir or the working dir;
    and fails if not found.

    @return binLower
    @return binUpper
    @return binCenter
    """
    
    if not (os.path.exists(binningFile)):
        binningFile = os.environ['FLIPPER_DIR']+os.path.sep+'params'+os.path.sep+binningFile
        if not (os.path.exists(binningFile)):
            raise IOError, 'Binning file %s not found'%binningFile
        
    binLower,binUpper,binCenter= pylab.load(binningFile,skiprows=1,unpack=True)
    return binLower,binUpper,binCenter
예제 #55
0
def test_bnet_approx_sum_product():
    """EXAMPLE: Loopy belief sum-product on BNET"""
    """Create all data required to instantiate the bnet object"""
    nodes = 4
    dag = np.zeros((nodes, nodes))
    C = 0
    S = 1
    R = 2
    W = 3
    dag[C, [R, S]] = 1
    dag[R, W] = 1
    dag[S, W] = 1
    ns = 2 * np.ones((1, nodes))
    """Instantiate the CPD for each node in the network"""
    node_cpds = [[], [], [], []]
    CPT = np.array([0.5, 0.5])
    node_cpds[C] = cpds.TabularCPD(CPT)
    CPT = np.array([[0.8, 0.2], [0.2, 0.8]])
    node_cpds[R] = cpds.TabularCPD(CPT)
    CPT = np.array([[0.5, 0.5], [0.9, 0.1]])
    node_cpds[S] = cpds.TabularCPD(CPT)
    CPT = np.array([[[1, 0], [0.1, 0.9]], [[0.1, 0.9], [0.01, 0.99]]])
    node_cpds[W] = cpds.TabularCPD(CPT)
    """Instantiate the object"""
    net = models.bnet(dag, ns, node_cpds)
    net.init_inference_engine(exact=False)
    """Create and enter evidence"""
    evidences = create_all_evidence(4, 2)
    results = []
    for evidence in evidences:
        net.sum_product(evidence)
        result = []
        result.append(np.max(net.marginal_nodes([0]).T))
        result.append(np.max(net.marginal_nodes([1]).T))
        result.append(np.max(net.marginal_nodes([2]).T))
        result.append(np.max(net.marginal_nodes([3]).T))
        results.append(result)

    results = np.array(results)
    """Get the expected results"""
    exp_results = np.array(
        pylab.load('./Data/bnet_approx_sum_product_res.txt'))
    """Assert that the output matched the expected values"""
    assert_array_equal(results, exp_results)
예제 #56
0
def PlotFile(filename, opt=None, lwidth=1.5):
    """
    Plot the archive filename
    without the command show()
    """
    f = pylab.load(filename)

    if numpy.size(numpy.shape(f)) == 1:  # uma dimensao o arquivo
        if opt:
            pylab.plot(range(0, len(f), 1), f, opt, linewidth=lwidth)
        else:
            pylab.plot(range(0, len(f), 1), f, linewidth=lwidth)

    if numpy.size(numpy.shape(f)) == 2:  # duas dimensoes o arquivo
        if opt:
            pylab.plot(f[:, 0], f[:, 1], opt, linewidth=lwidth)
        else:
            pylab.plot(f[:, 0], f[:, 1], linewidth=lwidth)

    return f
예제 #57
0
 def graph_model(self):
     """
     graph_model: plots the graph produced & displays to screen
     """
     try:
         if os.stat(
                 internalFilesDir + internalThreeDModel
         ).st_size > 0:  # checks for empty file; if so, OSError thrown
             with open(
                     internalFilesDir + internalThreeDModel, 'rb'
             ) as f:  # handles open, close, and errors with opening
                 try:  # If file doesn't load correctly, IOError is thrown.
                     self.ThreeDModel = pl.load(f)
                     plot_three_d_model(self.ThreeDModel,
                                        self.Parameters.du, figuresDir,
                                        self.Parameters, self.TestModeOn)
                 except IOError:
                     print "Error reading", internalThreeDModel
     except OSError:
         print "Try running 'make 3D' first."
예제 #58
0
def prep_data(filename):
    #    Data=pylab.load(r'c:\resolution_stuff\1p4K.iexy')
    Data = pylab.load(filename)
    xt = Data[:, 2]
    yt = Data[:, 3]
    zorigt = Data[:, 0]
    x = xt[:, zorigt > 0.0]
    y = yt[:, zorigt > 0.0]
    z = zorigt[:, zorigt > 0.0]
    #    zorig=ma.array(zorigt)
    print 'reached'
    threshold = 0.0
    #    print zorigt < threshold
    #    print N.isnan(zorigt)
    #    z = ma.masked_where(zorigt < threshold , zorigt)
    print 'where masked ', z.shape
    #should be commented out--just for testing
    ##    x = pylab.randn(Nu)/aspect
    ##    y = pylab.randn(Nu)
    ##    z = pylab.rand(Nu)
    ##    print x.shape
    ##    print y.shape
    # Grid
    xi, yi = N.mgrid[-5:5:100j, -5:5:100j]
    xi, yi = N.mgrid[x.min():x.max():.05, y.min():y.max():.05]
    # triangulate data
    tri = D.Triangulation(x, y)
    print 'before interpolator'
    # interpolate data
    interp = tri.nn_interpolator(z)
    print 'interpolator reached'
    zi = interp(xi, yi)
    # or, all in one line
    #    zi = Triangulation(x,y).nn_interpolator(z)(xi,yi)
    #    return x,y,z
    if interpolate_on == False:
        xi = x
        yi = y
        zi = z

    return xi, yi, zi