示例#1
0
文件: genetic.py 项目: myw/dataiap
def xover(chrom,N,p):
    """Single point crossover with probability N,precision p
    """
    N = round(chrom.shape[0]*N)
    index1 = scipy.arange(chrom.shape[0])
    index2 = scipy.unique(scipy.around(scipy.rand(chrom.shape[0],)*chrom.shape[0]))[0:chrom.shape[0]/2]
    sel1,sel2 = [],[]
    for i in range(len(index1)):
        if index1[i] not in index2:
            sel1.append(index1[i])
        else:
            sel2.append(index1[i])
    select1 = sel1[0:min([int(round(len(sel1)*N)),int(round(len(sel2)*N))])]
    select2 = sel2[0:min([int(round(len(sel1)*N)),int(round(len(sel2)*N))])]       
    
    # set xover points
    xoverpnt = scipy.around(scipy.rand(len(select1),)*(chrom.shape[1]-1))
    
    # perform xover
    nchrom = copy.deepcopy(chrom)
    for i in range(len(select1)):
        try:
            slice1 = chrom[select1[i],0:int(xoverpnt[i])]
            slice2 = chrom[select2[i],0:int(xoverpnt[i])]
            nchrom[select2[i],0:int(xoverpnt[i])] = slice1
            nchrom[select1[i],0:int(xoverpnt[i])] = slice2
        except:
            nchrom = nchrom
    
    return nchrom
示例#2
0
 def __init__(self, basef, 
              translate=True, 
              rotate=False, 
              conditioning=None, 
              asymmetry=None,
              oscillate=False, 
              penalize=None,
              ):
     FunctionEnvironment.__init__(self, basef.xdim, basef.xopt)
     self.desiredValue = basef.desiredValue            
     self.toBeMinimized = basef.toBeMinimized
     
     if translate:            
         self.xopt = (rand(self.xdim) - 0.5) * 9.8
         
     self._diags = eye(self.xdim)            
     self._R = eye(self.xdim)            
     self._Q = eye(self.xdim)            
     
     if conditioning is not None:
         self._diags = generateDiags(conditioning, self.xdim)
     if rotate:
         self._R = orth(rand(basef.xdim, basef.xdim))        
         if conditioning:
             self._Q = orth(rand(basef.xdim, basef.xdim))
                     
     tmp = lambda x: dot(self._Q, dot(self._diags, dot(self._R, x-self.xopt)))
     if asymmetry is not None:
         tmp2 = tmp
         tmp = lambda x: asymmetrify(tmp2(x), asymmetry)
     if oscillate:
         tmp3 = tmp
         tmp = lambda x: oscillatify(tmp3(x))
     
     self.f = lambda x: basef.f(tmp(x))
示例#3
0
    def test_blackbox(self):
        for A, b in self.cases:
            x = solve(A, b, verb=False, maxiter=A.shape[0])
            assert(norm(b - A*x)/norm(b - A*rand(b.shape[0],)) < 1e-4)

        # Special tests
        # (1) Make sure BSR format is preserved, and B is multiple vecs
        A, b = self.cases[-1]
        (x, ml) = solve(A, b, return_solver=True, verb=False)
        assert(ml.levels[0].B.shape[1] == 3)
        assert(ml.levels[0].A.format == 'bsr')

        # (2) Run with solver and make sure that solution is still good
        x = solve(A, b, existing_solver=ml, verb=False)
        assert(norm(b - A*x)/norm(b - A*rand(b.shape[0],)) < 1e-4)

        # (3) Convert to CSR, make sure B is a single vector
        (x, ml) = solve(A.tocsr(), b, return_solver=True, verb=False)
        assert(ml.levels[0].B.shape[1] == 1)
        assert(ml.levels[0].A.format == 'csr')

        # (4) Run with x0, maxiter and tol
        x = solve(A, b, existing_solver=ml, x0=zeros_like(b), tol=1e-8,
                  maxiter=300, verb=False)
        assert(norm(b - A*x)/norm(b - A*rand(b.shape[0],)) < 1e-7)

        # (5) Run nonsymmetric example, make sure BH isn't None
        A, b = self.cases[2]
        (x, ml) = solve(A, b, return_solver=True, verb=False,
                        maxiter=A.shape[0])
        assert(ml.levels[0].BH is not None)
示例#4
0
文件: depso.py 项目: mmssouza/cbir
 def run(self):
  # Para cada particula 
  for i in scipy.arange(self.ns):
   # atualiza melhor posicao da particula
   self.fit[i],aux = self.avalia_aptidao(self.pop[i])
   self.pop[i] = aux.copy()
   # atualiza melhor posicao da particula
   self.bfp_fitness[i],aux = self.avalia_aptidao(self.bfp[i])
   self.bfp[i] = aux.copy()
   if self.debug:
    print "self.fit[{0}] = {1} bfp_fitness = {2}".format(i,self.fit[i],self.bfp_fitness[i])
   if self.bfp_fitness[i] < self.fit[i]:
    self.bfp[i] = self.pop[i].copy()
    self.bfp_fitness[i] = self.fit[i].copy()
  
  # Atualiza melhor posicao global
  idx = self.bfp_fitness.argmax()
  curr_best_global_fitness = self.bfp_fitness[idx]
  curr_best_global = self.bfp[idx].copy()
  if curr_best_global_fitness > self.bfp_fitness.max():
    self.bfg = curr_best_global
 
  for i in scipy.arange(self.ns):
   # Atualiza velocidade
   self.v[i] = self.w*self.v[i] 
   self.v[i] = self.v[i] + self.c1*scipy.rand()*( self.bfp[i] - self.pop[i]) 
   self.v[i] = self.v[i] + self.c2*scipy.rand()*(self.bfg - self.pop[i])
   # Atualiza posicao
   self.pop[i] = self.pop[i] + self.v[i]
   
  # calcula fitness 
   self.fit[i],aux = self.avalia_aptidao(self.pop[i])
   self.pop[i] = aux.copy()
    def test_DAD(self):
        A = poisson((50, 50), format="csr")

        x = rand(A.shape[0])
        b = rand(A.shape[0])

        D = diag_sparse(1.0 / sqrt(10 ** (12 * rand(A.shape[0]) - 6))).tocsr()
        D_inv = diag_sparse(1.0 / D.data)

        DAD = D * A * D

        B = ones((A.shape[0], 1))

        # TODO force 2 level method and check that result is the same
        kwargs = {"max_coarse": 1, "max_levels": 2, "coarse_solver": "splu"}

        sa = smoothed_aggregation_solver(D * A * D, D_inv * B, **kwargs)

        residuals = []
        x_sol = sa.solve(b, x0=x, maxiter=10, tol=1e-12, residuals=residuals)

        avg_convergence_ratio = (residuals[-1] / residuals[0]) ** (1.0 / len(residuals))

        # print "Diagonal Scaling Test:   %1.3e,  %1.3e" %
        # (avg_convergence_ratio, 0.25)
        assert avg_convergence_ratio < 0.25
    def test_symmetry(self):
        # Test that a basic V-cycle yields a symmetric linear operator.  Common
        # reasons for failure are problems with using the same rho for the
        # pres/post-smoothers and using the same block_D_inv for
        # pre/post-smoothers.

        n = 500
        A = poisson((n,), format="csr")
        smoothers = [
            ("gauss_seidel", {"sweep": "symmetric"}),
            ("schwarz", {"sweep": "symmetric"}),
            ("block_gauss_seidel", {"sweep": "symmetric"}),
            "jacobi",
            "block_jacobi",
        ]
        rng = arange(1, n + 1, dtype="float").reshape(-1, 1)
        Bs = [ones((n, 1)), hstack((ones((n, 1)), rng))]

        for smoother in smoothers:
            for B in Bs:
                ml = smoothed_aggregation_solver(A, B, max_coarse=10, presmoother=smoother, postsmoother=smoother)
                P = ml.aspreconditioner()
                x = rand(n)
                y = rand(n)
                assert_approx_equal(dot(P * x, y), dot(x, P * y))
示例#7
0
    def test_poisson(self):
        cases = []

        cases.append((500,))
        cases.append((250, 250))
        cases.append((25, 25, 25))

        for case in cases:
            A = poisson(case, format='csr')

            np.random.seed(0)  # make tests repeatable

            x = sp.rand(A.shape[0])
            b = A*sp.rand(A.shape[0])  # zeros_like(x)

            ml = ruge_stuben_solver(A, max_coarse=50)

            res = []
            x_sol = ml.solve(b, x0=x, maxiter=20, tol=1e-12,
                             residuals=res)
            del x_sol

            avg_convergence_ratio = (res[-1]/res[0])**(1.0/len(res))

            assert(avg_convergence_ratio < 0.20)
示例#8
0
    def test_symmetry(self):
        # Test that a basic V-cycle yields a symmetric linear operator.  Common
        # reasons for failure are problems with using the same rho for the
        # pres/post-smoothers and using the same block_D_inv for
        # pre/post-smoothers.

        n = 500
        A = poisson((n,), format='csr')
        smoothers = [('gauss_seidel', {'sweep': 'symmetric'}),
                     ('schwarz', {'sweep': 'symmetric'}),
                     ('block_gauss_seidel', {'sweep': 'symmetric'}),
                     'jacobi', 'block_jacobi']
        rng = np.arange(1, n + 1, dtype='float').reshape(-1, 1)
        Bs = [np.ones((n, 1)), sp.hstack((np.ones((n, 1)), rng))]

        # TODO:
        # why does python 3 require significant=6 while python 2 passes
        # why does python 3 yield a different dot() below than python 2
        # only for: ('gauss_seidel', {'sweep': 'symmetric'})
        for smoother in smoothers:
            for B in Bs:
                ml = smoothed_aggregation_solver(A, B, max_coarse=10,
                                                 presmoother=smoother,
                                                 postsmoother=smoother)
                P = ml.aspreconditioner()
                np.random.seed(0)
                x = sp.rand(n,)
                y = sp.rand(n,)
                out = (np.dot(P * x, y), np.dot(x, P * y))
                # print("smoother = %s %g %g" % (smoother, out[0], out[1]))
                assert_approx_equal(out[0], out[1])
示例#9
0
 def test_improve_candidates(self):
     ##
     # test improve_candidates for the Poisson problem and elasticity, where rho_scale is 
     # the amount that each successive improve_candidates option should improve convergence
     # over the previous improve_candidates option.
     improve_candidates_list = [None, [('block_gauss_seidel', {'iterations' : 4, 'sweep':'symmetric'})] ]
     # make tests repeatable
     numpy.random.seed(0) 
     
     cases = []
     A_elas,B_elas = linear_elasticity( (60,60), format='bsr')
     #                Matrix                              Candidates    rho_scale
     cases.append( (poisson( (61,61),  format='csr'), ones((61*61,1)), 0.9 ) )
     cases.append( (A_elas,                           B_elas,       0.9 ) )
     for (A,B,rho_scale) in cases:
         last_rho = -1.0
         x0 = rand(A.shape[0],1) 
         b = rand(A.shape[0],1)
         for improve_candidates in improve_candidates_list:
             ml = smoothed_aggregation_solver(A, B, max_coarse=10, improve_candidates=improve_candidates)
             residuals=[]
             x_sol = ml.solve(b,x0=x0,maxiter=20,tol=1e-10, residuals=residuals)
             rho = (residuals[-1]/residuals[0])**(1.0/len(residuals))
             if last_rho == -1.0:
                 last_rho = rho
             else:
                 # each successive improve_candidates option should be an improvement on the previous
                 # print "\nimprove_candidates Test: %1.3e, %1.3e, %d\n"%(rho,rho_scale*last_rho,A.shape[0])
                 assert(rho < rho_scale*last_rho)
                 last_rho = rho
示例#10
0
def driver():
    """ 
    Driver function for testing Laguerre polynomials
    """

    from scipy import rand, randn
    from numpy import ceil

    all_tests = ValidationContainer()

    """ Laguerre case """
    N = int(ceil(50*rand()))
    all_tests.extend(quadrature_test(N,alpha=0.))
    all_tests.extend(evaluation_test(N,alpha=0.))

    """ Random generalized case 1 """
    N = int(ceil(50*rand()))
    alpha = 10*rand()
    all_tests.extend(quadrature_test(N, alpha=alpha))
    all_tests.extend(evaluation_test(N, alpha=alpha))

    """ Random generalized case 2 """
    N = int(ceil(50*rand()))
    alpha = 10*rand()
    all_tests.extend(quadrature_test(N, alpha=alpha))
    all_tests.extend(evaluation_test(N, alpha=alpha))

    return all_tests
示例#11
0
    def test_poisson(self):
        cases = []
        
        # perturbed Laplacian
        A = poisson( (50,50), format='csr' )
        Ai = A.copy(); Ai.data = Ai.data + 1e-5j*rand(Ai.nnz)
        cases.append((Ai, 0.25))
        
        # imaginary Laplacian
        Ai = 1.0j*A
        cases.append((Ai, 0.25))
        
        ## JBS:  Not sure if this is a valid test case
        ## imaginary shift 
        #Ai = A + 1.1j*scipy.sparse.eye(A.shape[0], A.shape[1])
        #cases.append((Ai,0.8))

        for A,rratio in cases:
            [asa,work] = adaptive_sa_solver(A, num_candidates = 1, symmetry='symmetric')
            #sa  = smoothed_aggregation_solver(A, B = ones((A.shape[0],1)) )
    
            b = zeros((A.shape[0],))
            x0 = rand(A.shape[0],) + 1.0j*rand(A.shape[0],)
    
            residuals0 = []
            residuals1 = []

            sol0 = asa.solve(b, x0=x0, maxiter=20, tol=1e-10, residuals=residuals0)
            #sol1 =  sa.solve(b, x0=x0, maxiter=20, tol=1e-10, residuals=residuals1)
           
            conv_asa = (residuals0[-1]/residuals0[0])**(1.0/len(residuals0))
            #conv_sa  = (residuals1[-1]/residuals1[0])**(1.0/len(residuals1))
            
            assert( conv_asa < rratio )
示例#12
0
 def __new__(self, nbinaries=1e6):
     arr = sp.ones(nbinaries, dtype=[(name, 'f8') for name in ['period', 'mass_ratio', 'eccentricity', 'phase', 'theta', 'inclination']])
     arr['eccentricity'] = 0.
     arr['phase'] = sp.rand(nbinaries)
     arr['theta'] = sp.rand(nbinaries) * 2 * sp.pi
     arr['inclination'] = sp.arccos(sp.rand(nbinaries) * 2. - 1.)
     return arr.view(OrbitalParameters)
示例#13
0
    def __init__(self, type='random', pars=parameters()):

        if type == 'random':
            ee = (rand(pars['Ne'], pars['Ne']) < pars['p_ee'])
            ei = (rand(pars['Ne'], pars['Ni']) < pars['p_ei'])
            ii = (rand(pars['Ni'], pars['Ni']) < pars['p_ii'])
            ie = (rand(pars['Ni'], pars['Ne']) < pars['p_ie'])
            self.A = vstack((hstack((ee, ei)), hstack((ie, ii))))
            self.A[range(pars['Ne'] + pars['Ni']), range(pars['Ne'] + pars['Ni'])] = 0  # remove selfloops

        elif type == 'none':
            self.A = zeros((pars['N'], pars['N']))  # no connectivity

        elif type == 'uni_torus':  # torus with uniform connectivity profile
            self.A = zeros((pars['N'], pars['N']))

            # construct matrix of pairwise distance
            distMat = zeros((pars['N'], pars['N']))
            for n1 in range(pars['N']):
                coord1 = linear2grid(n1, pars['N_col'])
                for n2 in arange(n1 + 1, pars['N']):
                    coord2 = linear2grid(n2, pars['N_col']) - coord1  # this sets neuron n1 to the origin
                    distMat[n1, n2] = toric_length(coord2, pars['N_row'], pars['N_col'])
            distMat = distMat + distMat.transpose()

            # construct adjajency matrix
            for n1 in range(pars['N']):
                neighbor_ids = nonzero(distMat[:, n1] < pars['sigma_con'])[0]
                random.shuffle(neighbor_ids)
                idx = neighbor_ids[0:min([pars['ncon'], len(neighbor_ids)])]
                self.A[idx, n1] = 1
        else:
            print "type " + type + " not yet implemented"
示例#14
0
 def setUp(self):
     SP.random.seed(1)
     nr = 3
     nc = 5
     n_dim1 = 8
     n_dim2 = 12
     # truncation of soft kronecker
     self.n_trunk = 10
     Xr = SP.rand(nr, n_dim1)
     Xc = SP.rand(nc, n_dim2)
     Cr = dlimix.CCovSqexpARD(n_dim1)
     Cr.setX(Xr)
     Cc = dlimix.CCovLinearARD(n_dim2)
     Cc.setX(Xc)
     self.C = dlimix.CKroneckerCF()
     self.C.setRowCovariance(Cr)
     self.C.setColCovariance(Cc)
     # set kronecker index
     self.kronecker_index = dlimix.CKroneckerCF.createKroneckerIndex(nc, nr)
     self.n = self.C.Kdim()
     self.n_dim = self.C.getNumberDimensions()
     self.name = "CKroneckerCF"
     self.n_params = self.C.getNumberParams()
     params = SP.exp(SP.randn(self.n_params))
     self.C.setParams(params)
示例#15
0
def initial_cond(coords, mass, dipole, temp, F):
    cm_coords = coords - tile(center_of_mass(coords, mass), (coords.shape[0], 1))

    print "computing inertia tensor and principal axes of inertia"

    mol_I, mol_Ix = eig(inertia_tensor(cm_coords, mass))
    mol_I.sort()

    print "principal moments of inertia are: ", mol_I

    # compute the ratio of the dipole energy to the
    # rotational energy

    print "x = (mu*F / kB*T_R) = ", norm(dipole) * F / kB_au / temp

    # random initial angular velocity vector
    # magnitude set so that 0.5 * I * w**2.0 = kT
    w_mag = sqrt(2.0 * kB_au * temp / mol_I.mean())
    w0 = 2.0 * rand(3) - 1.0
    w0 = w0 / norm(w0) * w_mag

    # random initial orientation / random unit quaternion
    q0 = 2.0 * rand(4) - 1.0
    q0 = q0 / norm(q0)

    return q0, w0
示例#16
0
    def test_symmetry(self):
        # Test that a basic V-cycle yields a symmetric linear operator.  Common
        # reasons for failure are problems with using the same rho for the
        # pres/post-smoothers and using the same block_D_inv for
        # pre/post-smoothers.

        n = 500
        A = poisson((n,), format='csr')
        smoothers = [('gauss_seidel', {'sweep': 'symmetric'}),
                     ('schwarz', {'sweep': 'symmetric'}),
                     ('block_gauss_seidel', {'sweep': 'symmetric'}),
                     'jacobi', 'block_jacobi']
        Bs = [ones((n, 1)),
              hstack((ones((n, 1)),
                      arange(1, n + 1, dtype='float').reshape(-1, 1)))]

        for smoother in smoothers:
            for B in Bs:
                ml = rootnode_solver(A, B, max_coarse=10,
                                     presmoother=smoother,
                                     postsmoother=smoother)
                P = ml.aspreconditioner()
                x = rand(n,)
                y = rand(n,)
                assert_approx_equal(dot(P * x, y), dot(x, P * y))
示例#17
0
    def test_basic(self):
        """check that method converges at a reasonable rate"""

        for A, B, c_factor, symmetry, smooth in self.cases:
            A = csr_matrix(A)

            ml = rootnode_solver(A, B, symmetry=symmetry, smooth=smooth,
                                 max_coarse=10)

            numpy.random.seed(0)  # make tests repeatable

            x = rand(A.shape[0]) + 1.0j * rand(A.shape[0])
            b = A * rand(A.shape[0])
            residuals = []

            x_sol = ml.solve(b, x0=x, maxiter=20, tol=1e-10,
                             residuals=residuals)

            avg_convergence_ratio =\
                (residuals[-1] / residuals[0]) ** (1.0 / len(residuals))

            # print "Complex Test:   %1.3e,  %1.3e,  %d,  %1.3e" % \
            #    (avg_convergence_ratio, c_factor, len(ml.levels),
            #     ml.operator_complexity())
            assert(avg_convergence_ratio < c_factor)
示例#18
0
文件: optimize.py 项目: mmssouza/idsc
 def run(self):
 
  for i in scipy.arange(self.ns):
   # Atualiza velocidade
   
   self.v[i] = self.w*self.v[i] 
   self.v[i] = self.v[i] + self.c1*scipy.rand()*( self.bfp[i] - self.pop[i]) 
   self.v[i] = self.v[i] + self.c2*scipy.rand()*(self.bfg - self.pop[i])
   for j in range(Dim):
    if self.v[i][j] >= 52.6:
     self.v[i][j] = 52.6
    elif self.v[i][j] <= -52.6:
     self.v[i][j] = -52.6
   # Atualiza posicao
   self.pop[i] = self.pop[i] + self.v[i]
   
   self.fit[i],self.pop[i] = self.avalia_aptidao(self.pop[i])
   
   # Atualiza melhor posicao da particula
   if self.fit[i] < self.bfp_fitness[i]:
    self.bfp[i] = self.pop[i]
    self.bfp_fitness[i] = self.fit[i]
   if self.debug:
    print "self.fit[{0}] = {1} bfp_fitness = {2}".format(i,self.fit[i],self.bfp_fitness[i])
  # Atualiza melhor posicao global
   if  self.bfp_fitness[i] < self.bfg_fitness:
    self.bfg_fitness = self.bfp_fitness[i].copy()
    self.bfg = self.bfp[i].copy()
示例#19
0
    def test_DAD(self):
        A = poisson((50, 50), format='csr')

        x = rand(A.shape[0])
        b = rand(A.shape[0])

        D = diag_sparse(1.0 / sqrt(10 ** (12 * rand(A.shape[0]) - 6))).tocsr()
        D_inv = diag_sparse(1.0 / D.data)

        DAD = D * A * D

        B = ones((A.shape[0], 1))

        # TODO force 2 level method and check that result is the same
        kwargs = {'max_coarse': 1, 'max_levels': 2, 'coarse_solver': 'splu'}

        sa = rootnode_solver(D * A * D, D_inv * B, **kwargs)

        residuals = []
        x_sol = sa.solve(b, x0=x, maxiter=10, tol=1e-12, residuals=residuals)

        avg_convergence_ratio =\
            (residuals[-1] / residuals[0]) ** (1.0 / len(residuals))

        # print "Diagonal Scaling Test:   %1.3e,  %1.3e" %
        # (avg_convergence_ratio, 0.4)
        assert(avg_convergence_ratio < 0.4)
示例#20
0
def test_Transformation1():
    "Transform 2-level to eigenbasis and back"
    H1 = scipy.rand() * sigmax() + scipy.rand() * sigmay() + scipy.rand() * sigmaz()
    evals, ekets = H1.eigenstates()
    Heb = H1.transform(ekets)       # eigenbasis (should be diagonal)
    H2 = Heb.transform(ekets, True) # back to original basis
    assert_equal((H1 - H2).norm() < 1e-6,True)
示例#21
0
文件: optimize.py 项目: mmssouza/idsc
 def Evolve_PSO(self):
  for i in scipy.arange(self.npop2):
   # Atualiza velocidade
   self.v[i] = self.w*self.v[i] 
   self.v[i] = self.v[i] + self.c1*scipy.rand()*( self.bfp[i] - self.pop2[i]) 
   self.v[i] = self.v[i] + self.c2*scipy.rand()*(self.bfg - self.pop2[i])
   for j in range(self.v.shape[1]):
    if self.v[i][j] >= self.arg_lim[j][1]/2:
     self.v[i][j] = self.arg_lim[j][1]/2
    elif self.v[i][j] <= -self.arg_lim[j][1]/2:
     self.v[i][j] = -self.arg_lim[j][1]/2
  
   # Atualiza posicao
   self.pop2[i] = self.pop2[i] + self.v[i]   
   self.ans2[i],self.pop2[i] = self.resolve_desafio(self.pop2[i])
   self.fit2[i] = self.avalia_aptidao2(self.ans2[i])
   self.bfp_fitness[i] = self.avalia_aptidao2(self.bfp_ans[i])
   self.bfg_fitness = self.avalia_aptidao2(self.bfg_ans)
   # Atualiza melhor posicao da particula
   if (self.fit2[i] > self.bfp_fitness[i]):
    self.bfp[i] = self.pop2[i]
    self.bfp_fitness[i] = self.fit2[i]
    self.bfp_ans[i] = self.ans2[i]
   # Atualiza melhor posicao global
   if (self.bfp_fitness[i] > self.bfg_fitness):
    self.bfg_fitness = self.bfp_fitness[i].copy()
    self.bfg = self.bfp[i].copy()
    self.bfg_ans = self.bfp_ans[i].copy()	
示例#22
0
    def testRWSeparatorDetection(self):
        "Read and write with automatic separator detection"

        # create some random data
        N = 10
        data = (1-2*scipy.rand(N,N)) + 1j*(1-2*scipy.rand(N,N))

        # comma separated values
        file_data_store("test.dat", data, "complex", "exp", ",")
        data2 = file_data_read("test.dat")
        assert_(amax(abs((data-data2))) < 1e-8) 

        # semicolon separated values
        file_data_store("test.dat", data, "complex", "exp", ";")
        data2 = file_data_read("test.dat")
        assert_(amax(abs((data-data2))) < 1e-8) 

        # tab separated values
        file_data_store("test.dat", data, "complex", "exp", "\t")
        data2 = file_data_read("test.dat")
        assert_(amax(abs((data-data2))) < 1e-8) 

        # space separated values
        file_data_store("test.dat", data, "complex", "exp", " ")
        data2 = file_data_read("test.dat")
        assert_(amax(abs((data-data2))) < 1e-8) 

        # mixed-whitespace separated values
        file_data_store("test.dat", data, "complex", "exp", " \t ")
        data2 = file_data_read("test.dat")
        assert_(amax(abs((data-data2))) < 1e-8) 
        os.remove("test.dat")
示例#23
0
    def initialize(self, state, chain):
        params = {}
        for key in self.scan_range.keys():
            # Check for single range
            if len(self.scan_range[key]) == 2:
                params[key] = sp.rand() * (self.scan_range[key][1] - self.scan_range[key][0]) + self.scan_range[key][0]
            else:
                # calculate weights of sub_regions
                sub_size = sp.array([])
                # Determine weights of region
                for i in range(0, len(self.scan_range[key]), 2):
                    sub_size = sp.append(sub_size, self.scan_range[key][i + 1] - self.scan_range[key][i])
                    self.range_weight[key] = sub_size / float(sp.sum(sub_size))

                # sample region based on size
                i_sel = 2 * sp.searchsorted(sp.cumsum(self.range_weight[key]), sp.rand())
                # sample point
                params[key] = (
                    sp.rand() * (self.scan_range[key][i_sel + 1] - self.scan_range[key][i_sel])
                    + self.scan_range[key][i_sel]
                )

        # params=dict([(key,sp.rand()*(self.scan_range[key][1]-self.scan_range[key][0])+self.scan_range[key][0]) for key in self.scan_range.keys() if type(self.scan_range[key])==list])

        # Add constant parameters
        for key in self.constants.keys():
            params[key] = self.constants[key]

        for key in self.functions.keys():
            params[key] = self.functions[key](params)

        modelid = "%i%01i" % (self.rank, 0) + "%i" % chain.accepted

        return params, modelid
示例#24
0
	def __init__(self, parent=None, width = 10, height = 12, dpi = 100, sharex = None, sharey = None):
		self.fig = Figure(figsize = (width, height), dpi=dpi, facecolor = '#FFFFFF')

		self.ax = Axes3D(self.fig)
#		n = 100
#		for c, zl, zh in [('r', -50, -25), ('b', -30, -5)]:
#		    xs = randrange(n, 23, 32)
#		    ys = randrange(n, 0, 100)
#		    zs = randrange(n, zl, zh)
		self.ax.scatter3D(S.rand(200), S.rand(200), S.rand(200))#, c = c,  alpha = 0.8)

		self.ax.set_xlabel('X Label')
		self.ax.set_ylabel('Y Label')
		self.ax.set_zlabel('Z Label')

#		self.ax = self.fig.add_subplot(111, sharex = sharex, sharey = sharey)
#		self.fig.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9)
		self.xtitle="x-Axis"
		self.ytitle="y-Axis"
		self.PlotTitle = "Plot"
		self.grid_status = True
		self.xaxis_style = 'linear'
		self.yaxis_style = 'linear'
		self.format_labels()
		self.ax.hold(True)
		FigureCanvas.__init__(self, self.fig)
		#self.fc = FigureCanvas(self.fig)
		FigureCanvas.setSizePolicy(self,
			QtGui.QSizePolicy.Expanding,
			QtGui.QSizePolicy.Expanding)
		FigureCanvas.updateGeometry(self)
示例#25
0
    def test_improve_candidates(self):
        # test improve_candidates for the Poisson problem and elasticity, where
        # rho_scale is the amount that each successive improve_candidates
        # option should improve convergence over the previous
        # improve_candidates option.
        improve_candidates_list = [None, [("block_gauss_seidel", {"iterations": 4, "sweep": "symmetric"})]]
        # make tests repeatable
        numpy.random.seed(0)

        cases = []
        A_elas, B_elas = linear_elasticity((60, 60), format="bsr")
        #                Matrix                       Candidates    rho_scale
        cases.append((poisson((75, 75), format="csr"), ones((75 * 75, 1)), 0.9))
        cases.append((A_elas, B_elas, 0.9))
        for (A, B, rho_scale) in cases:
            last_rho = -1.0
            x0 = rand(A.shape[0], 1)
            b = rand(A.shape[0], 1)
            for improve_candidates in improve_candidates_list:
                ml = rootnode_solver(A, B, max_coarse=10, improve_candidates=improve_candidates)
                residuals = []
                x_sol = ml.solve(b, x0=x0, maxiter=20, tol=1e-10, residuals=residuals)
                del x_sol
                rho = (residuals[-1] / residuals[0]) ** (1.0 / len(residuals))
                if last_rho == -1.0:
                    last_rho = rho
                else:
                    # each successive improve_candidates option should be an
                    # improvement on the previous print "\nimprove_candidates
                    # Test: %1.3e, %1.3e,
                    # %d\n"%(rho,rho_scale*last_rho,A.shape[0])
                    assert rho < rho_scale * last_rho
                    last_rho = rho
示例#26
0
 def test_dimensions(self):
     # 1D
     img = scipy.rand(10)
     template = scipy.random.randint(0, 2, (3))
     result = ght(img, template)
     self.assertEqual(result.ndim, 1, "Computing ght with one-dimensional input data failed.")
     # 2D
     img = scipy.rand(10, 11)
     template = scipy.random.randint(0, 2, (3, 4))
     result = ght(img, template)
     self.assertEqual(result.ndim, 2, "Computing ght with two-dimensional input data failed.")
     # 3D
     img = scipy.rand(10, 11, 12)
     template = scipy.random.randint(0, 2, (3, 4, 5))
     result = ght(img, template)
     self.assertEqual(result.ndim, 3, "Computing ght with three-dimensional input data failed.")
     # 4D
     img = scipy.rand(10, 11, 12, 13)
     template = scipy.random.randint(0, 2, (3, 4, 5, 6))
     result = ght(img, template)
     self.assertEqual(result.ndim, 4, "Computing ght with four-dimensional input data failed.")
     # 5D
     img = scipy.rand(3, 4, 3, 4, 3)
     template = scipy.random.randint(0, 2, (2, 2, 2, 2, 2))
     result = ght(img, template)
     self.assertEqual(result.ndim, 5, "Computing ght with five-dimensional input data failed.")
示例#27
0
文件: split.py 项目: pyamg/pyamg
def preprocess(S, coloring_method=None):
    """Preprocess splitting functions.

    Parameters
    ----------
    S : csr_matrix
        Strength of connection matrix
    method : string
        Algorithm used to compute the vertex coloring:
            * 'MIS' - Maximal Independent Set
            * 'JP'  - Jones-Plassmann (parallel)
            * 'LDF' - Largest-Degree-First (parallel)

    Returns
    -------
    weights: ndarray
        Weights from a graph coloring of G
    S : csr_matrix
        Strength matrix with ones
    T : csr_matrix
        transpose of S
    G : csr_matrix
        union of S and T

    Notes
    -----
    Performs the following operations:
        - Checks input strength of connection matrix S
        - Replaces S.data with ones
        - Creates T = S.T in CSR format
        - Creates G = S union T in CSR format
        - Creates random weights
        - Augments weights with graph coloring (if use_color == True)

    """
    if not isspmatrix_csr(S):
        raise TypeError('expected csr_matrix')

    if S.shape[0] != S.shape[1]:
        raise ValueError('expected square matrix, shape=%s' % (S.shape,))

    N = S.shape[0]
    S = csr_matrix((np.ones(S.nnz, dtype='int8'), S.indices, S.indptr),
                   shape=(N, N))
    T = S.T.tocsr()  # transpose S for efficient column access

    G = S + T  # form graph (must be symmetric)
    G.data[:] = 1

    weights = np.ravel(T.sum(axis=1))  # initial weights
    # weights -= T.diagonal()          # discount self loops

    if coloring_method is None:
        weights = weights + sp.rand(len(weights))
    else:
        coloring = vertex_coloring(G, coloring_method)
        num_colors = coloring.max() + 1
        weights = weights + (sp.rand(len(weights)) + coloring)/num_colors

    return (weights, G, S, T)
示例#28
0
 def runSequences(self, num_actions=1, num_features=1, num_states=1,
                  num_interactions=10000, gamma=None, _lambda=None, lr=None, r_states=None):
     if r_states is None:
         r_states = [rand(num_features) for _ in range(num_states)]
     else:
         num_features = len(r_states[0])
         num_states = len(r_states)
     state_seq = [choice(r_states) for  _ in range(num_interactions)]
     action_seq = [randint(0, num_actions - 1) for  _ in range(num_interactions)]
     rewards = [ones(num_interactions), rand(num_interactions), action_seq, [s[0] for s in state_seq]]
     datas = [list(zip(state_seq, action_seq, r)) for r in rewards]        
     res = []        
     for algo in self.algos:
         res.append((algo.__name__, []))
         for d in datas:
             l = algo(num_actions, num_features)
             if gamma is not None:       
                 l.rewardDiscount = gamma
             if _lambda is not None:
                 l._lambda = _lambda
             if lr is not None:
                 l.learningRate = lr                     
             self.trainWith(l, d)
             res[-1][-1].append([dot(l._theta, s) for s in r_states])
     return res
示例#29
0
文件: simulator.py 项目: PMBio/limix
    def getRegion(self,size=3e4,min_nSNPs=1,chrom_i=None,pos_min=None,pos_max=None):
        """
        Sample a region from the piece of genotype X, chrom, pos
        minSNPnum:  minimum number of SNPs contained in the region
        Ichrom:  restrict X to chromosome Ichrom before taking the region
        cis:        bool vector that marks the sorted region
        region:  vector that contains chrom and init and final position of the region
        """
        if (self.chrom is None) or (self.pos is None):
            bim = plink_reader.readBIM(self.bfile,usecols=(0,1,2,3))
            chrom = SP.array(bim[:,0],dtype=int)
            pos   = SP.array(bim[:,3],dtype=int)
        else:
            chrom = self.chrom
            pos   = self.pos

        if chrom_i is None:
            n_chroms = chrom.max()
            chrom_i  = int(SP.ceil(SP.rand()*n_chroms))

        pos   = pos[chrom==chrom_i]
        chrom = chrom[chrom==chrom_i]

        ipos = SP.ones(len(pos),dtype=bool)
        if pos_min is not None:
            ipos = SP.logical_and(ipos,pos_min<pos)

        if pos_max is not None:
            ipos = SP.logical_and(ipos,pos<pos_max)

        pos = pos[ipos]
        chrom = chrom[ipos]

        if size==1:
            # select single SNP
            idx = int(SP.ceil(pos.shape[0]*SP.rand()))
            cis  = SP.arange(pos.shape[0])==idx
            region = SP.array([chrom_i,pos[idx],pos[idx]])
        else:
            while 1:
                idx = int(SP.floor(pos.shape[0]*SP.rand()))
                posT1 = pos[idx]
                posT2 = pos[idx]+size
                if posT2<=pos.max():
                    cis = chrom==chrom_i
                    cis*= (pos>posT1)*(pos<posT2)
                    if cis.sum()>min_nSNPs: break
            region = SP.array([chrom_i,posT1,posT2])

        start = SP.nonzero(cis)[0].min()
        nSNPs  = cis.sum()

        if self.X is None:
            rv = plink_reader.readBED(self.bfile,useMAFencoding=True,start = start, nSNPs = nSNPs,bim=bim)
            Xr = rv['snps']
        else:
            Xr = self.X[:,start:start+nSnps]

        return Xr, region
示例#30
0
文件: BaseTest.py 项目: PMEAL/OpenPNM
 def test_interleave_data_with_unyts_on_only_one(self):
     import unyt
     pn = op.network.Cubic(shape=[10, 1, 1])
     geo1 = op.geometry.GenericGeometry(network=pn, pores=[0, 1, 2, 3, 4])
     geo2 = op.geometry.GenericGeometry(network=pn, pores=[5, 6, 7, 8, 9])
     geo1['pore.test'] = sp.rand(geo1.Np, )
     geo2['pore.test'] = sp.rand(geo2.Np, ) * unyt.m
     assert hasattr(pn['pore.test'], 'units')
    def arg_max_reduced_likelihood_function(self):
        """
        This function estimates the autocorrelation parameters theta as the
        maximizer of the reduced likelihood function.
        (Minimization of the opposite reduced likelihood function is used for
        convenience)

        Parameters
        ----------
        self : All parameters are stored in the Gaussian Process model object.

        Returns
        -------
        optimal_theta : array_like
            The best set of autocorrelation parameters (the sought maximizer of
            the reduced likelihood function).

        optimal_reduced_likelihood_function_value : double
            The optimal reduced likelihood function value.

        optimal_par : dict
            The BLUP parameters associated to thetaOpt.
        """

        # Initialize output
        best_optimal_theta = []
        best_optimal_rlf_value = []
        best_optimal_par = []

        if self.verbose:
            print "The chosen optimizer is: " + str(self.optimizer)
            if self.random_start > 1:
                print str(self.random_start) + " random starts are required."

        percent_completed = 0.

        # Force optimizer to fmin_cobyla if the model is meant to be isotropic
        if self.optimizer == 'Welch' and self.theta0.size == 1:
            self.optimizer = 'fmin_cobyla'

        if self.optimizer == 'fmin_cobyla':

            def minus_reduced_likelihood_function(log10t):
                return -self.reduced_likelihood_function(theta=10.**log10t)[0]

            constraints = []
            for i in range(self.theta0.size):
                constraints.append(lambda log10t: \
                            log10t[i] - np.log10(self.thetaL[0, i]))
                constraints.append(lambda log10t: \
                            np.log10(self.thetaU[0, i]) - log10t[i])

            for k in range(self.random_start):

                if k == 0:
                    # Use specified starting point as first guess
                    theta0 = self.theta0
                else:
                    # Generate a random starting point log10-uniformly
                    # distributed between bounds
                    log10theta0 = np.log10(self.thetaL) \
                        + rand(self.theta0.size).reshape(self.theta0.shape) \
                        * np.log10(self.thetaU / self.thetaL)
                    theta0 = 10.**log10theta0

                # Run Cobyla
                log10_optimal_theta = \
                    optimize.fmin_cobyla(minus_reduced_likelihood_function,
                                    np.log10(theta0), constraints, iprint=0)

                optimal_theta = 10.**log10_optimal_theta
                optimal_minus_rlf_value, optimal_par = \
                    self.reduced_likelihood_function(theta=optimal_theta)
                optimal_rlf_value = -optimal_minus_rlf_value

                # Compare the new optimizer to the best previous one
                if k > 0:
                    if optimal_rlf_value > best_optimal_rlf_value:
                        best_optimal_rlf_value = optimal_rlf_value
                        best_optimal_par = optimal_par
                        best_optimal_theta = optimal_theta
                else:
                    best_optimal_rlf_value = optimal_rlf_value
                    best_optimal_par = optimal_par
                    best_optimal_theta = optimal_theta
                if self.verbose and self.random_start > 1:
                    if (20 * k) / self.random_start > percent_completed:
                        percent_completed = (20 * k) / self.random_start
                        print "%s completed" % (5 * percent_completed)

            optimal_rlf_value = best_optimal_rlf_value
            optimal_par = best_optimal_par
            optimal_theta = best_optimal_theta

        elif self.optimizer == 'Welch':

            # Backup of the given atrributes
            theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
            corr = self.corr
            verbose = self.verbose

            # This will iterate over fmin_cobyla optimizer
            self.optimizer = 'fmin_cobyla'
            self.verbose = False

            # Initialize under isotropy assumption
            if verbose:
                print("Initialize under isotropy assumption...")
            self.theta0 = array2d(self.theta0.min())
            self.thetaL = array2d(self.thetaL.min())
            self.thetaU = array2d(self.thetaU.max())
            theta_iso, optimal_rlf_value_iso, par_iso = \
                self.arg_max_reduced_likelihood_function()
            optimal_theta = theta_iso + np.zeros(theta0.shape)

            # Iterate over all dimensions of theta allowing for anisotropy
            if verbose:
                print("Now improving allowing for anisotropy...")
            for i in np.random.permutation(range(theta0.size)):
                if verbose:
                    print "Proceeding along dimension %d..." % (i + 1)
                self.theta0 = array2d(theta_iso)
                self.thetaL = array2d(thetaL[0, i])
                self.thetaU = array2d(thetaU[0, i])

                def corr_cut(t, d):
                    return corr(
                        array2d(
                            np.hstack([
                                optimal_theta[0][0:i], t[0],
                                optimal_theta[0][(i + 1)::]
                            ])), d)

                self.corr = corr_cut
                optimal_theta[0, i], optimal_rlf_value, optimal_par = \
                    self.arg_max_reduced_likelihood_function()

            # Restore the given atrributes
            self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
            self.corr = corr
            self.optimizer = 'Welch'
            self.verbose = verbose

        else:

            raise NotImplementedError(
                ("This optimizer ('%s') is not " +
                 "implemented yet. Please contribute!") % self.optimizer)

        return optimal_theta, optimal_rlf_value, optimal_par
示例#32
0
def approximate_spectral_radius(A,
                                tol=0.01,
                                maxiter=15,
                                restart=5,
                                symmetric=None,
                                initial_guess=None,
                                return_vector=False):
    """
    Approximate the spectral radius of a matrix

    Parameters
    ----------

    A : {dense or sparse matrix}
        E.g. csr_matrix, csc_matrix, ndarray, etc.
    tol : {scalar}
        Relative tolerance of approximation, i.e., the error divided
        by the approximate spectral radius is compared to tol.
    maxiter : {integer}
        Maximum number of iterations to perform
    restart : {integer}
        Number of restarted Arnoldi processes.  For example, a value of 0 will
        run Arnoldi once, for maxiter iterations, and a value of 1 will restart
        Arnoldi once, using the maximal eigenvector from the first Arnoldi
        process as the initial guess.
    symmetric : {boolean}
        True  - if A is symmetric
                Lanczos iteration is used (more efficient)
        False - if A is non-symmetric (default
                Arnoldi iteration is used (less efficient)
    initial_guess : {array|None}
        If n x 1 array, then use as initial guess for Arnoldi/Lanczos.
        If None, then use a random initial guess.
    return_vector : {boolean}
        True - return an approximate dominant eigenvector, in addition to the
               spectral radius.
        False - Do not return the approximate dominant eigenvector

    Returns
    -------
    An approximation to the spectral radius of A, and
    if return_vector=True, then also return the approximate dominant
    eigenvector

    Notes
    -----
    The spectral radius is approximated by looking at the Ritz eigenvalues.
    Arnoldi iteration (or Lanczos) is used to project the matrix A onto a
    Krylov subspace: H = Q* A Q.  The eigenvalues of H (i.e. the Ritz
    eigenvalues) should represent the eigenvalues of A in the sense that the
    minimum and maximum values are usually well matched (for the symmetric case
    it is true since the eigenvalues are real).

    References
    ----------
    .. [1] Z. Bai, J. Demmel, J. Dongarra, A. Ruhe, and H. van der Vorst,
       editors.  "Templates for the Solution of Algebraic Eigenvalue Problems:
       A Practical Guide", SIAM, Philadelphia, 2000.

    Examples
    --------
    >>> from pyamg.util.linalg import approximate_spectral_radius
    >>> import numpy as np
    >>> from scipy.linalg import eigvals, norm
    >>> A = np.array([[1.,0.],[0.,1.]])
    >>> print approximate_spectral_radius(A,maxiter=3)
    1.0
    >>> print max([norm(x) for x in eigvals(A)])
    1.0
    """

    if not hasattr(A, 'rho') or return_vector:
        # somehow more restart causes a nonsymmetric case to fail...look at
        # this what about A.dtype=int?  convert somehow?

        # The use of the restart vector v0 requires that the full Krylov
        # subspace V be stored.  So, set symmetric to False.
        symmetric = False

        if maxiter < 1:
            raise ValueError('expected maxiter > 0')
        if restart < 0:
            raise ValueError('expected restart >= 0')
        if A.dtype == int:
            raise ValueError('expected A to be float (complex or real)')
        if A.shape[0] != A.shape[1]:
            raise ValueError('expected square A')

        if initial_guess is None:
            v0 = sp.rand(A.shape[1], 1)
            if A.dtype == complex:
                v0 = v0 + 1.0j * sp.rand(A.shape[1], 1)
        else:
            if initial_guess.shape[0] != A.shape[0]:
                raise ValueError('initial_guess and A must have same shape')
            if (len(initial_guess.shape) > 1) and (initial_guess.shape[1] > 1):
                raise ValueError('initial_guess must be an (n,1) or\
                                  (n,) vector')
            v0 = initial_guess.reshape(-1, 1)
            v0 = np.array(v0, dtype=A.dtype)

        for j in range(restart + 1):
            [evect, ev, H, V, breakdown_flag] =\
                _approximate_eigenvalues(A, tol, maxiter,
                                         symmetric, initial_guess=v0)
            # Calculate error in dominant eigenvector
            nvecs = ev.shape[0]
            max_index = np.abs(ev).argmax()
            error = H[nvecs, nvecs - 1] * evect[-1, max_index]

            # error is a fast way of calculating the following line
            # error2 = ( A - ev[max_index]*sp.mat(
            #           sp.eye(A.shape[0],A.shape[1])) )*\
            #           ( sp.mat(sp.hstack(V[:-1]))*\
            #           evect[:,max_index].reshape(-1,1) )
            # print str(error) + "    " + str(sp.linalg.norm(e2))

            if (np.abs(error)/np.abs(ev[max_index]) < tol) or\
               breakdown_flag:
                # halt if below relative tolerance
                v0 = np.dot(np.hstack(V[:-1]), evect[:,
                                                     max_index].reshape(-1, 1))
                break
            else:
                v0 = np.dot(np.hstack(V[:-1]), evect[:,
                                                     max_index].reshape(-1, 1))
        # end j-loop

        rho = np.abs(ev[max_index])
        if sparse.isspmatrix(A):
            A.rho = rho

        if return_vector:
            return (rho, v0)
        else:
            return rho

    else:
        return A.rho
示例#33
0
def ishermitian(A, fast_check=True, tol=1e-6, verbose=False):
    """Returns True if A is Hermitian to within tol

    Parameters
    ----------
    A   : {dense or sparse matrix}
        e.g. array, matrix, csr_matrix, ...
    fast_check : {bool}
        If True, use the heuristic < Ax, y> = < x, Ay>
        for random vectors x and y to check for conjugate symmetry.
        If False, compute A - A.H.
    tol : {float}
        Symmetry tolerance

    verbose: {bool}
        prints
        max( \|A - A.H\| )       if nonhermitian and fast_check=False
        abs( <Ax, y> - <x, Ay> ) if nonhermitian and fast_check=False

    Returns
    -------
    True                        if hermitian
    False                       if nonhermitian

    Notes
    -----
    This function applies a simple test of conjugate symmetry

    Examples
    --------
    >>> import numpy as np
    >>> from pyamg.util.linalg import ishermitian
    >>> ishermitian(np.array([[1,2],[1,1]]))
    False

    >>> from pyamg.gallery import poisson
    >>> ishermitian(poisson((10,10)))
    True
    """
    # convert to matrix type
    if not sparse.isspmatrix(A):
        A = np.asmatrix(A)

    if fast_check:
        x = sp.rand(A.shape[0], 1)
        y = sp.rand(A.shape[0], 1)
        if A.dtype == complex:
            x = x + 1.0j * sp.rand(A.shape[0], 1)
            y = y + 1.0j * sp.rand(A.shape[0], 1)
        xAy = np.dot((A * x).conjugate().T, y)
        xAty = np.dot(x.conjugate().T, A * y)
        diff = float(np.abs(xAy - xAty) / np.sqrt(np.abs(xAy * xAty)))

    else:
        # compute the difference, A - A.H
        if sparse.isspmatrix(A):
            diff = np.ravel((A - A.H).data)
        else:
            diff = np.ravel(A - A.H)

        if np.max(diff.shape) == 0:
            diff = 0
        else:
            diff = np.max(np.abs(diff))

    if diff < tol:
        diff = 0
        return True
    else:
        if verbose:
            print(diff)
        return False

    return diff
示例#34
0
import scipy as sp
import nose.tools as nt
import numpy.testing as npt

# my imports

from special import wignerd, gensph

# Global variables
# ================================================================================

lmax = 20

mns = [(0, 0), (2, 2), (2, -2), (0, 2)]
mns += [(0, 1), (3, 0), (3, 5), (7, 1)]
theta = sp.pi * sp.rand(2, 3)
ae = lambda x, y: npt.assert_array_almost_equal(x, y, decimal=14)

d00 = sp.array([
    sp.ones(theta.shape),
    sp.cos(theta), .5 * (3 * sp.cos(theta)**2 - 1.0),
    -.5 * sp.cos(theta) * (3.0 - 5.0 * sp.cos(theta)**2),
    1.0 / 8.0 * (3.0 - 30 * sp.cos(theta)**2 + 35 * sp.cos(theta)**4)
])
d22 = sp.array([
    sp.zeros(theta.shape),
    sp.zeros(theta.shape), .25 * (1.0 + sp.cos(theta))**2,
    -.25 * (1.0 + sp.cos(theta))**2 * (2 - 3 * sp.cos(theta)), .25 *
    (1.0 + sp.cos(theta))**2 * (1 - 7 * sp.cos(theta) + 7 * sp.cos(theta)**2)
])
d2m2 = sp.array([
示例#35
0
def _approximate_eigenvalues(A,
                             tol,
                             maxiter,
                             symmetric=None,
                             initial_guess=None):
    """Used by approximate_spectral_radius and condest

       Returns [W, E, H, V, breakdown_flag], where W and E are the eigenvectors
       and eigenvalues of the Hessenberg matrix H, respectively, and V is the
       Krylov space.  breakdown_flag denotes whether Lanczos/Arnoldi suffered
       breakdown.  E is therefore the approximate eigenvalues of A.

       To obtain approximate eigenvectors of A, compute V*W.
       """

    from scipy.sparse.linalg import aslinearoperator

    A = aslinearoperator(A)  # A could be dense or sparse, or something weird

    # Choose tolerance for deciding if break-down has occurred
    t = A.dtype.char
    eps = np.finfo(np.float).eps
    feps = np.finfo(np.single).eps
    geps = np.finfo(np.longfloat).eps
    _array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2}
    breakdown = {
        0: feps * 1e3,
        1: eps * 1e6,
        2: geps * 1e6
    }[_array_precision[t]]
    breakdown_flag = False

    if A.shape[0] != A.shape[1]:
        raise ValueError('expected square matrix')

    maxiter = min(A.shape[0], maxiter)

    if initial_guess is None:
        v0 = sp.rand(A.shape[1], 1)
        if A.dtype == complex:
            v0 = v0 + 1.0j * sp.rand(A.shape[1], 1)
    else:
        v0 = initial_guess

    v0 /= norm(v0)

    # Important to type H based on v0, so that a real nonsymmetric matrix, can
    # have an imaginary initial guess for its Arnoldi Krylov space
    H = np.zeros((maxiter + 1, maxiter),
                 dtype=np.find_common_type([v0.dtype, A.dtype], []))

    V = [v0]

    beta = 0.0
    for j in range(maxiter):
        w = A * V[-1]

        if symmetric:
            if j >= 1:
                H[j - 1, j] = beta
                w -= beta * V[-2]

            alpha = np.dot(np.conjugate(w.ravel()), V[-1].ravel())
            H[j, j] = alpha
            w -= alpha * V[-1]  # axpy(V[-1],w,-alpha)

            beta = norm(w)
            H[j + 1, j] = beta

            if (H[j + 1, j] < breakdown):
                breakdown_flag = True
                break

            w /= beta

            V.append(w)
            V = V[-2:]  # retain only last two vectors

        else:
            # orthogonalize against Vs
            for i, v in enumerate(V):
                H[i, j] = np.dot(np.conjugate(v.ravel()), w.ravel())
                w = w - H[i, j] * v

            H[j + 1, j] = norm(w)

            if (H[j + 1, j] < breakdown):
                breakdown_flag = True
                if H[j + 1, j] != 0.0:
                    w = w / H[j + 1, j]
                V.append(w)
                break

            w = w / H[j + 1, j]
            V.append(w)

            # if upper 2x2 block of Hessenberg matrix H is almost symmetric,
            # and the user has not explicitly specified symmetric=False,
            # then switch to symmetric Lanczos algorithm
            # if symmetric is not False and j == 1:
            #    if abs(H[1,0] - H[0,1]) < 1e-12:
            #        #print "using symmetric mode"
            #        symmetric = True
            #        V = V[1:]
            #        H[1,0] = H[0,1]
            #        beta = H[2,1]

    # print "Approximated spectral radius in %d iterations" % (j + 1)

    from scipy.linalg import eig

    Eigs, Vects = eig(H[:j + 1, :j + 1], left=False, right=True)

    return (Vects, Eigs, H, V, breakdown_flag)
示例#36
0
    0.99 * x_aper,
    'y_sem_ellip_insc':
    0.99 * y_aper
})

picFDSW = PIC_FDSW.FiniteDifferences_ShortleyWeller_SquareGrid(
    chamb=chamber, Dh=Dh, sparse_solver='PyKLU')
picFD = PIC_FD.FiniteDifferences_Staircase_SquareGrid(chamb=chamber,
                                                      Dh=Dh,
                                                      sparse_solver='PyKLU')
picFFTPEC = PIC_PEC_FFT.FFT_PEC_Boundary_SquareGrid(x_aper=chamber.x_aper,
                                                    y_aper=chamber.y_aper,
                                                    Dh=Dh)

# generate particles
x_part = R_charge * (2. * rand(N_part_gen) - 1.)
y_part = R_charge * (2. * rand(N_part_gen) - 1.)
mask_keep = x_part**2 + y_part**2 < R_charge**2
x_part = x_part[mask_keep]
y_part = y_part[mask_keep]

nel_part = 0 * x_part + 1.

#pic scatter
picFDSW.scatter(x_part, y_part, nel_part)
picFD.scatter(x_part, y_part, nel_part)
picFFTPEC.scatter(x_part, y_part, nel_part)

#pic scatter
picFDSW.solve()
picFD.solve()
示例#37
0
    def plot(self, img_order=None, freq=None, figsize=None, no_axis=False, mic_marker_size=10, **kwargs):
        ''' Plots the room with its walls, microphones, sources and images '''
    
        import matplotlib
        from matplotlib.patches import Circle, Wedge, Polygon
        from matplotlib.collections import PatchCollection
        import matplotlib.pyplot as plt

        if (self.dim == 2):
            fig = plt.figure(figsize=figsize)

            if no_axis is True:
                ax = fig.add_axes([0, 0, 1, 1], aspect='equal', **kwargs)
                ax.axis('off')
                rect = fig.patch
                rect.set_facecolor('gray')
                rect.set_alpha(0.15)
            else:
                ax = fig.add_subplot(111, aspect='equal', **kwargs)

            # draw room
            polygons = [Polygon(self.corners.T, True)]
            p = PatchCollection(polygons, cmap=matplotlib.cm.jet,
                    facecolor=np.array([1, 1, 1]), edgecolor=np.array([0, 0, 0]))
            ax.add_collection(p)

            # draw the microphones
            if (self.mic_array is not None):
                for mic in self.mic_array.R.T:
                    ax.scatter(mic[0], mic[1],
                            marker='x', linewidth=0.5, s=mic_marker_size, c='k')

                # draw the beam pattern of the beamformer if requested (and available)
                if freq is not None \
                        and isinstance(self.mic_array, bf.Beamformer) \
                        and (self.mic_array.weights is not None or self.mic_array.filters is not None):

                    freq = np.array(freq)
                    if freq.ndim is 0:
                        freq = np.array([freq])

                    # define a new set of colors for the beam patterns
                    newmap = plt.get_cmap('autumn')
                    desat = 0.7
                    try:
                        # this is for matplotlib >= 2.0.0
                        ax.set_prop_cycle(color=[newmap(k) for k in desat*np.linspace(0, 1, len(freq))])
                    except:
                        # keep this for backward compatibility
                        ax.set_color_cycle([newmap(k) for k in desat*np.linspace(0, 1, len(freq))])

                    phis = np.arange(360) * 2 * np.pi / 360.
                    newfreq = np.zeros(freq.shape)
                    H = np.zeros((len(freq), len(phis)), dtype=complex)
                    for i, f in enumerate(freq):
                        newfreq[i], H[i] = self.mic_array.response(phis, f)

                    # normalize max amplitude to one
                    H = np.abs(H)**2/np.abs(H).max()**2

                    # a normalization factor according to room size
                    norm = np.linalg.norm((self.corners - self.mic_array.center), axis=0).max()

                    # plot all the beam patterns
                    i = 0
                    for f, h in zip(newfreq, H):
                        x = np.cos(phis) * h * norm + self.mic_array.center[0, 0]
                        y = np.sin(phis) * h * norm + self.mic_array.center[1, 0]
                        l = ax.plot(x, y, '-', linewidth=0.5)

            # define some markers for different sources and colormap for damping
            markers = ['o', 's', 'v', '.']
            cmap = plt.get_cmap('YlGnBu')
            # draw the scatter of images
            for i, source in enumerate(self.sources):
                # draw source
                ax.scatter(
                    source.position[0],
                    source.position[1],
                    c=cmap(1.),
                    s=20,
                    marker=markers[i %len(markers)],
                    edgecolor=cmap(1.))

                # draw images
                if (img_order is None):
                    img_order = self.max_order

                I = source.orders <= img_order

                val = (np.log2(source.damping[I]) + 10.) / 10.
                # plot the images
                ax.scatter(source.images[0, I],
                    source.images[1, I],
                    c=cmap(val),
                    s=20,
                    marker=markers[i % len(markers)],
                    edgecolor=cmap(val))

            return fig, ax
            
        if(self.dim==3):

            import mpl_toolkits.mplot3d as a3
            import matplotlib.colors as colors
            import matplotlib.pyplot as plt
            import scipy as sp

            fig = plt.figure(figsize=figsize)
            ax = a3.Axes3D(fig)

            # plot the walls
            for w in self.walls:
                tri = a3.art3d.Poly3DCollection([w.corners.T], alpha=0.5)
                tri.set_color(colors.rgb2hex(sp.rand(3)))
                tri.set_edgecolor('k')
                ax.add_collection3d(tri)

            # define some markers for different sources and colormap for damping
            markers = ['o', 's', 'v', '.']
            cmap = plt.get_cmap('YlGnBu')
            # draw the scatter of images
            for i, source in enumerate(self.sources):
                # draw source
                ax.scatter(
                    source.position[0],
                    source.position[1],
                    source.position[2],
                    c=cmap(1.),
                    s=20,
                    marker=markers[i %len(markers)],
                    edgecolor=cmap(1.))

                # draw images
                if (img_order is None):
                    img_order = self.max_order

                I = source.orders <= img_order

                val = (np.log2(source.damping[I]) + 10.) / 10.
                # plot the images
                ax.scatter(source.images[0, I],
                    source.images[1, I],
                    source.images[2, I],
                    c=cmap(val),
                    s=20,
                    marker=markers[i % len(markers)],
                    edgecolor=cmap(val))


            # draw the microphones
            if (self.mic_array is not None):
                for mic in self.mic_array.R.T:
                    ax.scatter(mic[0], mic[1], mic[2],
                            marker='x', linewidth=0.5, s=mic_marker_size, c='k')


            return fig, ax
示例#38
0
from pyamg import gallery, rootnode_solver
from pyamg.gallery import stencil_grid
from pyamg.gallery.diffusion import diffusion_stencil_2d

from cvoutput import *
from convergence_tools import print_cycle_history

##
# Run Rotated Anisotropic Diffusion
n = 10
nx = n
ny = n
stencil = diffusion_stencil_2d(type='FE',epsilon=0.001,theta=scipy.pi/3)
A = stencil_grid(stencil, (nx,ny), format='csr')
numpy.random.seed(625)
x = scipy.rand(A.shape[0])
b = A*scipy.rand(A.shape[0])

ml = rootnode_solver(A, strength=('evolution', {'epsilon':2.0}), 
                    smooth=('energy', {'degree':2}), max_coarse=10)
resvec = []
x = ml.solve(b, x0=x, maxiter=20, tol=1e-14, residuals=resvec)
print_cycle_history(resvec, ml, verbose=True, plotting=False)

##
# Write ConnectionViewer files for multilevel hierarchy ml
xV,yV = numpy.meshgrid(numpy.arange(0,ny,dtype=float),numpy.arange(0,nx,dtype=float))
Verts = numpy.concatenate([[xV.ravel()],[yV.ravel()]],axis=0).T
outputML("test", Verts, ml)

示例#39
0
               c='r',
               marker='o')
    ax.scatter(np.asarray([x[maxx][0]]),
               np.asarray([y[0][maxy]]),
               np.asarray([maxz + 0.2]),
               c='b',
               marker='D')
    #    ax.text(x[maxx][0],y[0][maxy],maxz+0.2,"Goal",'y')
    #    for vtx in [ sp.rand(4,3)*2 + 2 for i in range(10) ] :
    for vtx in np.asarray(cube()) * np.asarray([[1, 1, 2], [1, 1, 2],
                                                [1, 1, 2], [1, 1, 2]]) + 2:
        tri = a3.art3d.Poly3DCollection([vtx],
                                        facecolors='w',
                                        linewidths=1,
                                        alpha=0.5)
        tri.set_color(colors.rgb2hex(sp.rand(3)))
        tri.set_edgecolor('k')
        ax.add_collection3d(tri)

    surf = ax.plot_surface(x,
                           y,
                           z,
                           rstride=1,
                           cstride=1,
                           linewidth=0,
                           antialiased=False,
                           facecolors=rgb)
    plt.savefig(frameLocation + 'frame.png', bbox_inches='tight')
    movie_file(frameLocation + 'frame.png')
    plt.close(1)
示例#40
0
frame_ix = 1000
frame = dlc.get_frame(Vid, frame_ix)
dlc.plot_frame(frame, axes=axes)

# plot all traj in selection
for i in tqdm(SDf.index):
    TrialDf = TrialDfs[i]
    Df = bhv.event_slice(TrialDf,'PRESENT_INTERVAL_STATE','ITI_STATE')
    t_on = Df.iloc[0]['t']
    t_off = Df.iloc[-1]['t']

    # trial by trial colors
    bp_cols = {}
    cmaps = dict(zip(bodyparts,['viridis','magma']))
    for bp in bodyparts:
        c = sns.color_palette(cmaps[bp],as_cmap=True)(sp.rand())
        bp_cols[bp] = c

    # marker for the start
    # frame_ix = dlc.time2frame(t_on, m, b, m2, b2)
    frame_ix = Sync.convert(t_on, 'arduino', 'dlc').round().astype('int')
    dlc.plot_bodyparts(bodyparts, DlcDf, frame_ix, colors=bp_cols, axes=axes, markersize=5)

    # the trajectory
    DlcDfSlice = bhv.time_slice(DlcDf, t_on, t_off)
    dlc.plot_trajectories(DlcDfSlice, bodyparts, colors=bp_cols, axes=axes, lw=0.75, alpha=0.75, p=0.8)

# %% 
"""
 
 ##     ## #### ########  ########  #######  
示例#41
0
def discretisation(eigen_vec):
    eps = 2.2204e-16

    # normalize the eigenvectors
    [n, k] = shape(eigen_vec)
    vm = kron(ones((1, k)), sqrt(multiply(eigen_vec, eigen_vec).sum(1)))
    eigen_vec = divide(eigen_vec, vm)

    svd_restarts = 0
    exitLoop = 0

    ### if there is an exception we try to randomize and rerun SVD again
    ### do this 30 times
    while (svd_restarts < 30) and (exitLoop == 0):

        # initialize algorithm with a random ordering of eigenvectors
        c = zeros((n, 1))
        R = matrix(zeros((k, k)))
        R[:, 0] = eigen_vec[int(rand(1) * (n - 1)), :].transpose()

        for j in range(1, k):
            c = c + abs(eigen_vec * R[:, j - 1])
            R[:, j] = eigen_vec[c.argmin(), :].transpose()

        lastObjectiveValue = 0
        nbIterationsDiscretisation = 0
        nbIterationsDiscretisationMax = 20

        # iteratively rotate the discretised eigenvectors until they
        # are maximally similar to the input eignevectors, this
        # converges when the differences between the current solution
        # and the previous solution differs by less than eps or we
        # we have reached the maximum number of itarations
        while exitLoop == 0:
            nbIterationsDiscretisation = nbIterationsDiscretisation + 1

            # rotate the original eigen_vectors
            tDiscrete = eigen_vec * R

            # discretise the result by setting the max of each row=1 and
            # other values to 0
            j = reshape(asarray(tDiscrete.argmax(1)), n)
            eigenvec_discrete=csc_matrix((ones(len(j)),(range(0,n), \
                array(j))),shape=(n,k))

            # calculate a rotation to bring the discrete eigenvectors cluster to
            # the original eigenvectors
            tSVD = eigenvec_discrete.transpose() * eigen_vec
            # catch a SVD convergence error and restart
            try:
                U, S, Vh = svd(tSVD)
            except LinAlgError:
                # catch exception and go back to the beginning of the loop
                print >> sys.stderr, \
                    "SVD did not converge, randomizing and trying again"
                break

            # test for convergence
            NcutValue = 2 * (n - S.sum())
            if((abs(NcutValue-lastObjectiveValue) < eps ) or \
                      ( nbIterationsDiscretisation > \
                        nbIterationsDiscretisationMax )):
                exitLoop = 1
            else:
                # otherwise calculate rotation and continue
                lastObjectiveValue = NcutValue
                R = matrix(Vh).transpose() * matrix(U).transpose()

    if exitLoop == 0:
        raise SVDError("SVD did not converge after 30 retries")
    else:
        return (eigenvec_discrete)
示例#42
0
    def test_nonhermitian(self):
        # problem data
        data = load_example('helmholtz_2D')
        A = data['A'].tocsr()
        B = data['B']
        np.random.seed(625)
        x0 = sp.rand(A.shape[0]) + 1.0j * sp.rand(A.shape[0])
        b = A * sp.rand(A.shape[0]) + 1.0j * (A * sp.rand(A.shape[0]))
        # solver parameters
        smooth = ('energy', {'krylov': 'gmres'})
        SA_build_args = {
            'max_coarse': 25,
            'coarse_solver': 'pinv2',
            'symmetry': 'symmetric'
        }
        SA_solve_args = {'cycle': 'V', 'maxiter': 20, 'tol': 1e-8}
        strength = [('evolution', {'k': 2, 'epsilon': 2.0})]
        smoother = ('gauss_seidel_nr', {'sweep': 'symmetric', 'iterations': 1})
        # Construct solver with nonsymmetric parameters
        sa = rootnode_solver(A,
                             B=B,
                             smooth=smooth,
                             strength=strength,
                             presmoother=smoother,
                             postsmoother=smoother,
                             **SA_build_args)
        residuals = []
        # stand-alone solve
        x = sa.solve(b, x0=x0, residuals=residuals, **SA_solve_args)
        residuals = np.array(residuals)
        avg_convergence_ratio =\
            (residuals[-1] / residuals[0]) ** (1.0 / len(residuals))
        # print "Test 3  %1.3e,  %1.3e" % (avg_convergence_ratio, 0.92)
        assert (avg_convergence_ratio < 0.92)
        # accelerated solve
        residuals = []
        x = sa.solve(b,
                     x0=x0,
                     residuals=residuals,
                     accel='gmres',
                     **SA_solve_args)
        del x
        residuals = np.array(residuals)
        avg_convergence_ratio =\
            (residuals[-1] / residuals[0]) ** (1.0 / len(residuals))
        # print "Test 4  %1.3e,  %1.3e" % (avg_convergence_ratio, 0.8)
        assert (avg_convergence_ratio < 0.8)

        # test that nonsymmetric parameters give the same result as symmetric
        # parameters for the complex-symmetric matrix A
        strength = 'symmetric'
        SA_build_args['symmetry'] = 'nonsymmetric'
        sa_nonsymm = rootnode_solver(A,
                                     B=np.ones((A.shape[0], 1)),
                                     smooth=smooth,
                                     strength=strength,
                                     presmoother=smoother,
                                     postsmoother=smoother,
                                     improve_candidates=None,
                                     **SA_build_args)
        SA_build_args['symmetry'] = 'symmetric'
        sa_symm = rootnode_solver(A,
                                  B=np.ones((A.shape[0], 1)),
                                  smooth=smooth,
                                  strength=strength,
                                  presmoother=smoother,
                                  postsmoother=smoother,
                                  improve_candidates=None,
                                  **SA_build_args)
        for (symm_lvl, nonsymm_lvl) in zip(sa_nonsymm.levels, sa_symm.levels):
            assert_array_almost_equal(symm_lvl.A.todense(),
                                      nonsymm_lvl.A.todense())
示例#43
0
def rand_super():
    h_5 = rand_herm(5)
    return propagator(h_5, scipy.rand(), [
        create(5), destroy(5), jmat(2, 'z')
    ])
示例#44
0
def run_example(with_plots=True):
    r"""
    Example to demonstrate the use of the Sundials solver Kinsol with
    a user provided Jacobian and a preconditioner. The example is the 
    'Problem 4' taken from the book by Saad:
    Iterative Methods for Sparse Linear Systems.
    """
    #Read the original matrix
    A_original = IO.mmread(os.path.join(file_path, "kinsol_ors_matrix.mtx"))

    #Scale the original matrix
    A = SPARSE.spdiags(1.0 / A_original.diagonal(), 0,
                       len(A_original.diagonal()), len(
                           A_original.diagonal())) * A_original

    #Preconditioning by Symmetric Gauss Seidel
    if True:
        D = SPARSE.spdiags(A.diagonal(), 0, len(A_original.diagonal()),
                           len(A_original.diagonal()))
        Dinv = SPARSE.spdiags(1.0 / A.diagonal(), 0,
                              len(A_original.diagonal()),
                              len(A_original.diagonal()))
        E = -SPARSE.tril(A, k=-1)
        F = -SPARSE.triu(A, k=1)
        L = (D - E).dot(Dinv)
        U = D - F
        Prec = L.dot(U)

        solvePrec = LINSP.factorized(Prec)

    #Create the RHS
    b = A.dot(N.ones((A.shape[0], 1)))

    #Define the res
    def res(x):
        return A.dot(x.reshape(len(x), 1)) - b

    #The Jacobian
    def jac(x):
        return A.todense()

    #The Jacobian*Vector
    def jacv(x, v):
        return A.dot(v.reshape(len(v), 1))

    def prec_setup(u, f, uscale, fscale):
        pass

    def prec_solve(r):
        return solvePrec(r)

    y0 = S.rand(A.shape[0])

    #Define an Assimulo problem
    alg_mod = Algebraic_Problem(res,
                                y0=y0,
                                jac=jac,
                                jacv=jacv,
                                name='ORS Example')
    alg_mod_prec = Algebraic_Problem(res,
                                     y0=y0,
                                     jac=jac,
                                     jacv=jacv,
                                     prec_solve=prec_solve,
                                     prec_setup=prec_setup,
                                     name='ORS Example (Preconditioned)')

    #Define the KINSOL solver
    alg_solver = KINSOL(alg_mod)
    alg_solver_prec = KINSOL(alg_mod_prec)

    #Sets the parameters
    def setup_param(solver):
        solver.linear_solver = "spgmr"
        solver.max_dim_krylov_subspace = 10
        solver.ftol = LIN.norm(res(solver.y0)) * 1e-9
        solver.max_iter = 300
        solver.verbosity = 10
        solver.globalization_strategy = "none"

    setup_param(alg_solver)
    setup_param(alg_solver_prec)

    #Solve orignal system
    y = alg_solver.solve()

    #Solve Preconditionined system
    y_prec = alg_solver_prec.solve()

    print("Error                 , in y: ", LIN.norm(y - N.ones(len(y))))
    print("Error (preconditioned), in y: ",
          LIN.norm(y_prec - N.ones(len(y_prec))))

    if with_plots:
        import pylab as P
        P.figure(4)
        P.semilogy(alg_solver.get_residual_norm_nonlinear_iterations(),
                   label="Original")
        P.semilogy(alg_solver_prec.get_residual_norm_nonlinear_iterations(),
                   label='Preconditioned')
        P.xlabel("Number of Iterations")
        P.ylabel("Residual Norm")
        P.title("Solution Progress")
        P.legend()
        P.grid()

        P.figure(5)
        P.plot(y, label="Original")
        P.plot(y_prec, label="Preconditioned")
        P.legend()
        P.grid()

        P.show()

    #Basic test
    for j in range(len(y)):
        nose.tools.assert_almost_equal(y[j], 1.0, 4)

    return [alg_mod, alg_mod_prec], [alg_solver, alg_solver_prec]
示例#45
0
文件: help.py 项目: predmach/DeFiNe
def random_spanning_tree(gg):
    for u,v,d in gg.edges(data=1):
        d['rand']=1.0*sp.rand()
    return nx.minimum_spanning_tree(gg,weight='rand')
示例#46
0
def plot_labels(lbl: scipy.ndarray, lbl_count: int) -> None:
    """Shows a plot of the given label image with a random color map."""
    color_map = scipy.rand(lbl_count, 3)
    color_map = matplotlib.colors.ListedColormap(color_map)
    plt.imshow(lbl, cmap=color_map)
    plt.show()
示例#47
0
def generic(geometry, func, seeds='pore.seed', **kwargs):
    if seeds not in geometry:
        geometry['pore.seed'] = _sp.rand(geometry.Np, )
    return _misc.generic(geometry=geometry, func=func, seeds=seeds)
示例#48
0
    def invert(self, rdn_meas, geom, out=None, init=None):
        """Inverts a meaurement. Returns an array of state vector samples.
           Similar to Inversion.invert() but returns a list of samples."""

        # We will truncate non-surface parameters to their bounds, but leave
        # Surface reflectance unconstrained so it can dip slightly below zero
        # in a channel without invalidating the whole vector
        bounds = s.array([self.fm.bounds[0].copy(), self.fm.bounds[1].copy()])
        bounds[:, self.fm.surface_inds] = s.array([[-s.inf], [s.inf]])

        # Initialize to conjugate gradient solution
        x_MAP = Inversion.invert(self, rdn_meas, geom, out, init)

        # Proposal is based on the posterior uncertainty
        S_hat, K, G = self.calc_posterior(x_MAP, geom, rdn_meas)
        proposal_Cov = S_hat * self.proposal_scaling
        proposal = multivariate_normal(cov=proposal_Cov)

        # We will use this routine for initializing
        def initialize():
            x = multivariate_normal(mean=x_MAP, cov=S_hat).rvs()
            too_low = x < bounds[0]
            x[too_low] = bounds[0][too_low]+eps
            too_high = x > bounds[1]
            x[too_high] = bounds[1][too_high]-eps
            dens = self.log_density(x, rdn_meas, geom, bounds)
            return x, dens

        # Sample from the posterior using Metropolis/Hastings MCMC
        samples, acpts, rejs, x = [], 0, 0, None
        for i in range(self.iterations):

            if i % self.restart_every == 0:
                x, dens = initialize()

            xp = x + proposal.rvs()
            dens_new = self.log_density(xp, rdn_meas,  geom, bounds=bounds)

            # Test vs. the Metropolis / Hastings criterion
            if s.isfinite(dens_new) and\
                    s.log(s.rand()) <= min((dens_new - dens, 0.0)):
                x = xp
                dens = dens_new
                acpts = acpts + 1
                if self.verbose:
                    print('%8.5e %8.5e ACCEPT! rate %4.2f' %
                          (dens, dens_new, s.mean(acpts/(acpts+rejs))))
            else:
                rejs = rejs + 1
                if self.verbose:
                    print('%8.5e %8.5e REJECT  rate %4.2f' %
                          (dens, dens_new, s.mean(acpts/(acpts+rejs))))

            # Make sure we have not wandered off the map
            if not s.isfinite(dens_new):
                x, dens = initialize()

            if i % self.restart_every < self.burnin:
                samples.append(x)

        return x_MAP.copy(), s.array(samples)
示例#49
0
a2009s.32k_fs.reduce3.very_smooth.left.dfs'))

# view_patch_vtk(dfs_left_sm)
rho_rho = []
rho_all = []
#lst=lst[:1]
labs_all = sp.zeros((len(dfs_left.labels), len(lst)))
sub = lst[0]
data = scipy.io.loadmat(
    os.path.join(p_dir, sub, sub + '.rfMRI_REST1_LR.\
reduce3.ftdata.NLM_11N_hvar_25.mat'))
LR_flag = msk['LR_flag']
LR_flag = np.squeeze(LR_flag) != 0
data = data['ftdata_NLM']
temp = data[LR_flag, :]
temp[5000:6000, 500:700] += sp.rand(1000, 200)  # temp[1000, :]
m = np.mean(temp, 1)
temp = temp - m[:, None]
s = np.std(temp, 1) + 1e-16
temp = temp / s[:, None]
d1 = temp

sub = lst[1]
data = scipy.io.loadmat(
    os.path.join(p_dir, sub, sub + '.rfMRI_REST1_LR.\
reduce3.ftdata.NLM_11N_hvar_25.mat'))
LR_flag = msk['LR_flag']
LR_flag = np.squeeze(LR_flag) != 0
data = data['ftdata_NLM']
temp = data[LR_flag, :]
m = np.mean(temp, 1)
示例#50
0
def normal(geometry, scale, loc, seeds='pore.seed', **kwargs):
    if seeds not in geometry:
        geometry['pore.seed'] = _sp.rand(geometry.Np, )
    return _misc.normal(geometry=geometry, scale=scale, loc=loc, seeds=seeds)
from convergence_tools import print_cycle_history

if __name__ == '__main__':

    print "\nDiffusion problem discretized with p=5 and the local\n" + \
          "discontinuous Galerkin method."

    # Discontinuous Galerkin Diffusion Problem
    data = load_example('local_disc_galerkin_diffusion')
    A = data['A'].tocsr()
    B = data['B']
    elements = data['elements']
    vertices = data['vertices']
    numpy.random.seed(625)
    x0 = scipy.rand(A.shape[0])
    b = numpy.zeros_like(x0)

    ##
    # For demonstration, show that a naive SA solver
    # yields unsatisfactory convergence
    smooth = ('jacobi', {'filter': True})
    strength = ('symmetric', {'theta': 0.1})
    SA_solve_args = {'cycle': 'W', 'maxiter': 20, 'tol': 1e-8, 'accel': 'cg'}
    SA_build_args={'max_levels':10, 'max_coarse':25, 'coarse_solver':'pinv2', \
            'symmetry':'hermitian', 'keep':True}
    presmoother = ('gauss_seidel', {'sweep': 'symmetric', 'iterations': 1})
    postsmoother = ('gauss_seidel', {'sweep': 'symmetric', 'iterations': 1})

    ##
    # Construct solver and solve
示例#52
0
文件: split.py 项目: lacrymose/pyamg
def preprocess(S, coloring_method=None):
    """Common preprocess for splitting functions

    Parameters
    ----------
    S : csr_matrix
        Strength of connection matrix
    method : {string}
        Algorithm used to compute the vertex coloring:
            * 'MIS' - Maximal Independent Set
            * 'JP'  - Jones-Plassmann (parallel)
            * 'LDF' - Largest-Degree-First (parallel)

    Returns
    -------
    weights: ndarray
        Weights from a graph coloring of G
    S : csr_matrix
        Strength matrix with ones
    T : csr_matrix
        transpose of S
    G : csr_matrix
        union of S and T

    Notes
    -----
    Performs the following operations:
        - Checks input strength of connection matrix S
        - Replaces S.data with ones
        - Creates T = S.T in CSR format
        - Creates G = S union T in CSR format
        - Creates random weights
        - Augments weights with graph coloring (if use_color == True)

    """

    if not isspmatrix_csr(S):
        raise TypeError('expected csr_matrix')

    if S.shape[0] != S.shape[1]:
        raise ValueError('expected square matrix, shape=%s' % (S.shape, ))

    N = S.shape[0]
    S = csr_matrix((np.ones(S.nnz, dtype='int8'), S.indices, S.indptr),
                   shape=(N, N))
    T = S.T.tocsr()  # transpose S for efficient column access

    G = S + T  # form graph (must be symmetric)
    G.data[:] = 1

    weights = np.ravel(T.sum(axis=1))  # initial weights
    # weights -= T.diagonal()          # discount self loops

    if coloring_method is None:
        weights = weights + sp.rand(len(weights))
    else:
        coloring = vertex_coloring(G, coloring_method)
        num_colors = coloring.max() + 1
        weights = weights + (sp.rand(len(weights)) + coloring) / num_colors

    return (weights, G, S, T)
示例#53
0
 def test_Mooney(self):
     A10, A01, kappa, rho = sp.rand(4) * 1E3 + 100
     print('Material parameters A10, A01 and kappa:', A10, A01, kappa)
     my_material = MooneyRivlin(A10, A01, kappa, rho)
     self.my_element.material = my_material
     self.jacobi_test_element(rtol=1E-3)
示例#54
0
 def test_Mooney(self):
     A10, A01, kappa, rho = sp.rand(4) * 1E3 + 100
     my_material = MooneyRivlin(A10, A01, kappa, rho)
     self.my_element.material = my_material
     self.jacobi_test_element(rtol=5E-4)
示例#55
0
    def solve(self, wls):
        """Isotropic solver.

        INPUT
        wls = wavelengths to scan (any asarray-able object).

        OUTPUT
        self.DE1, self.DE3 = power reflected and transmitted.

        NOTE
        see:
        Moharam, "Formulation for stable and efficient implementation
        of the rigorous coupled-wave analysis of binary gratings",
        JOSA A, 12(5), 1995
        Lalanne, "Highly improved convergence of the coupled-wave
        method for TM polarization", JOSA A, 13(4), 1996
        Moharam, "Stable implementation of the rigorous coupled-wave
        analysis for surface-relief gratings: enhanced trasmittance
        matrix approach", JOSA A, 12(5), 1995
        """

        self.wls = S.atleast_1d(wls)

        LAMBDA = self.LAMBDA
        n = self.n
        multilayer = self.multilayer
        alpha = self.alpha
        delta = self.delta
        psi = self.psi
        phi = self.phi

        nlayers = len(multilayer)
        i = S.arange(-n, n + 1)
        nood = 2 * n + 1
        hmax = nood - 1

        # grating vector (on the xz plane)
        # grating on the xy plane
        K = 2 * pi / LAMBDA * \
            S.array([S.sin(phi), 0., S.cos(phi)], dtype=complex)

        DE1 = S.zeros((nood, self.wls.size))
        DE3 = S.zeros_like(DE1)

        dirk1 = S.array([S.sin(alpha) * S.cos(delta),
                         S.sin(alpha) * S.sin(delta),
                         S.cos(alpha)])

        # usefull matrices
        I = S.eye(i.size)
        I2 = S.eye(i.size * 2)
        ZERO = S.zeros_like(I)

        X = S.zeros((2 * nood, 2 * nood, nlayers), dtype=complex)
        MTp1 = S.zeros((2 * nood, 2 * nood, nlayers), dtype=complex)
        MTp2 = S.zeros_like(MTp1)

        EPS2 = S.zeros(2 * hmax + 1, dtype=complex)
        EPS21 = S.zeros_like(EPS2)

        dlt = (i == 0).astype(int)

        for iwl, wl in enumerate(self.wls):

            # free space wavevector
            k = 2 * pi / wl

            n1 = multilayer[0].mat.n(wl).item()
            n3 = multilayer[-1].mat.n(wl).item()

            # incident plane wave wavevector
            k1 = k * n1 * dirk1

            # all the other wavevectors
            tmp_x = k1[0] - i * K[0]
            tmp_y = k1[1] * S.ones_like(i)
            tmp_z = dispersion_relation_ordinary(tmp_x, tmp_y, k, n1)
            k1i = S.r_[[tmp_x], [tmp_y], [tmp_z]]

            # k2i = S.r_[[k1[0] - i*K[0]], [k1[1] - i * K[1]], [-i * K[2]]]

            tmp_z = dispersion_relation_ordinary(tmp_x, tmp_y, k, n3)
            k3i = S.r_[[k1i[0, :]], [k1i[1, :]], [tmp_z]]

            # aliases for constant wavevectors
            kx = k1i[0, :]
            ky = k1[1]

            # angles of reflection
            # phi_i = S.arctan2(ky,kx)
            phi_i = S.arctan2(ky, kx.real)  # OKKIO

            Kx = S.diag(kx / k)
            Ky = ky / k * I
            Z1 = S.diag(k1i[2, :] / (k * n1 ** 2))
            Y1 = S.diag(k1i[2, :] / k)
            Z3 = S.diag(k3i[2, :] / (k * n3 ** 2))
            Y3 = S.diag(k3i[2, :] / k)
            # Fc = S.diag(S.cos(phi_i))
            fc = S.cos(phi_i)
            # Fs = S.diag(S.sin(phi_i))
            fs = S.sin(phi_i)

            MR = S.asarray(S.bmat([[I, ZERO],
                                   [-1j * Y1, ZERO],
                                   [ZERO, I],
                                   [ZERO, -1j * Z1]]))

            MT = S.asarray(S.bmat([[I, ZERO],
                                   [1j * Y3, ZERO],
                                   [ZERO, I],
                                   [ZERO, 1j * Z3]]))

            # internal layers (grating or layer)
            X.fill(0.0)
            MTp1.fill(0.0)
            MTp2.fill(0.0)
            for nlayer in range(nlayers - 2, 0, -1):  # internal layers

                layer = multilayer[nlayer]
                d = layer.thickness

                EPS2, EPS21 = layer.getEPSFourierCoeffs(
                    wl, n, anisotropic=False)

                E = toeplitz(EPS2[hmax::-1], EPS2[hmax:])
                E1 = toeplitz(EPS21[hmax::-1], EPS21[hmax:])
                E11 = inv(E1)
                # B = S.dot(Kx, linsolve(E,Kx)) - I
                B = kx[:, S.newaxis] / k * linsolve(E, Kx) - I
                # A = S.dot(Kx, Kx) - E
                A = S.diag((kx / k) ** 2) - E

                # Note: solution bug alfredo
                # randomizzo Kx un po' a caso finche' cond(A) e' piccolo (<1e10)
                # soluzione sporca... :-(
                # per certi kx, l'operatore di helmholtz ha 2 autovalori nulli e A, B
                # non sono invertibili --> cambio leggermente i kx... ma dovrei invece
                # trattare separatamente (analiticamente) questi casi
                if cond(A) > 1e10:
                    warning('BAD CONDITIONING: randomization of kx')
                    while cond(A) > 1e10:
                        Kx = Kx * (1 + 1e-9 * S.rand())
                        B = kx[:, S.newaxis] / k * linsolve(E, Kx) - I
                        A = S.diag((kx / k) ** 2) - E

                if S.absolute(K[2] / k) > 1e-10:

                    raise ValueError(
                        'First Order Helmholtz Operator not implemented, yet!')

                elif ky == 0 or S.allclose(S.diag(Ky / ky * k), 1):

                    # lalanne
                    # H_U_reduced = S.dot(Ky, Ky) + A
                    H_U_reduced = (ky / k) ** 2 * I + A
                    # H_S_reduced = S.dot(Ky, Ky) + S.dot(Kx, linsolve(E, S.dot(Kx, E11))) - E11
                    H_S_reduced = (ky / k) ** 2 * I + kx[:, S.newaxis] / k * linsolve(E,
                                                                                      kx[:, S.newaxis] / k * E11) - E11

                    q1, W1 = eig(H_U_reduced)
                    q1 = S.sqrt(q1)
                    q2, W2 = eig(H_S_reduced)
                    q2 = S.sqrt(q2)

                    # boundary conditions

                    # V11 = S.dot(linsolve(A, W1), S.diag(q1))
                    V11 = linsolve(A, W1) * q1[S.newaxis, :]
                    V12 = (ky / k) * S.dot(linsolve(A, Kx), W2)
                    V21 = (ky / k) * S.dot(linsolve(B, Kx), linsolve(E, W1))
                    # V22 = S.dot(linsolve(B, W2), S.diag(q2))
                    V22 = linsolve(B, W2) * q2[S.newaxis, :]

                    # Vss = S.dot(Fc, V11)
                    Vss = fc[:, S.newaxis] * V11
                    # Wss = S.dot(Fc, W1)  + S.dot(Fs, V21)
                    Wss = fc[:, S.newaxis] * W1 + fs[:, S.newaxis] * V21
                    # Vsp = S.dot(Fc, V12) - S.dot(Fs, W2)
                    Vsp = fc[:, S.newaxis] * V12 - fs[:, S.newaxis] * W2
                    # Wsp = S.dot(Fs, V22)
                    Wsp = fs[:, S.newaxis] * V22
                    # Wpp = S.dot(Fc, V22)
                    Wpp = fc[:, S.newaxis] * V22
                    # Vpp = S.dot(Fc, W2)  + S.dot(Fs, V12)
                    Vpp = fc[:, S.newaxis] * W2 + fs[:, S.newaxis] * V12
                    # Wps = S.dot(Fc, V21) - S.dot(Fs, W1)
                    Wps = fc[:, S.newaxis] * V21 - fs[:, S.newaxis] * W1
                    # Vps = S.dot(Fs, V11)
                    Vps = fs[:, S.newaxis] * V11

                    Mc2bar = S.asarray(S.bmat([[Vss, Vsp, Vss, Vsp],
                                               [Wss, Wsp, -Wss, -Wsp],
                                               [Wps, Wpp, -Wps, -Wpp],
                                               [Vps, Vpp, Vps, Vpp]]))

                    x = S.r_[S.exp(-k * q1 * d), S.exp(-k * q2 * d)]

                    # Mc1 = S.dot(Mc2bar, S.diag(S.r_[S.ones_like(x), x]))
                    xx = S.r_[S.ones_like(x), x]
                    Mc1 = Mc2bar * xx[S.newaxis, :]

                    X[:, :, nlayer] = S.diag(x)

                    MTp = linsolve(Mc2bar, MT)
                    MTp1[:, :, nlayer] = MTp[0:2 * nood, :]
                    MTp2 = MTp[2 * nood:, :]

                    MT = S.dot(
                        Mc1, S.r_[
                            I2, S.dot(
                                MTp2, linsolve(
                                    MTp1[
                                        :, :, nlayer], X[
                                        :, :, nlayer]))])

                else:

                    ValueError(
                        'Second Order Helmholtz Operator not implemented, yet!')

            # M = S.asarray(S.bmat([-MR, MT]))
            M = S.c_[-MR, MT]
            b = S.r_[S.sin(psi) * dlt,
                     1j * S.sin(psi) * n1 * S.cos(alpha) * dlt,
                     -1j * S.cos(psi) * n1 * dlt,
                     S.cos(psi) * S.cos(alpha) * dlt]

            x = linsolve(M, b)
            R, T = S.split(x, 2)
            Rs, Rp = S.split(R, 2)
            for ii in range(1, nlayers - 1):
                T = S.dot(linsolve(MTp1[:, :, ii], X[:, :, ii]), T)
            Ts, Tp = S.split(T, 2)

            DE1[:, iwl] = (k1i[2, :] / (k1[2])).real * S.absolute(Rs) ** 2 + \
                          (k1i[2, :] / (k1[2] * n1 ** 2)).real * \
                S.absolute(Rp) ** 2
            DE3[:, iwl] = (k3i[2, :] / (k1[2])).real * S.absolute(Ts) ** 2 + \
                          (k3i[2, :] / (k1[2] * n3 ** 2)).real * \
                S.absolute(Tp) ** 2

        # save the results
        self.DE1 = DE1
        self.DE3 = DE3

        return self
示例#56
0
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import sys
import time

from pylab import load as L
import numpy as N
from scipy import rand

test_data = rand(10, 2)

my_data = [['00', '01', '02'], ['10', '11', '12'], ['20', '21', '22'],
           ['30', '31', '32']]
#my_data = L('Z.csv', delimiter = ',')


def main():
    app = QApplication(sys.argv)
    w = DBTable()
    #w.show()
    sys.exit(app.exec_())


class DBTable(QWidget):
    #            super(Plot_Widget, self).__init__(None)#new  new^2:changed from parent
    #        self.setAttribute(Qt.WA_DeleteOnClose)

    def __init__(self, dataList=None, colHeaderList=None):
        QWidget.__init__(self)
        self.setAttribute(Qt.WA_DeleteOnClose)
        self.setWindowTitle('Database Result')
示例#57
0
# -*- coding: utf-8 -*-
"""
Created on Thu Jan  4 14:01:02 2018

@author : Alexis Martin
"""
import scipy
import numpy as np
import time
import PartitionData as part
import matplotlib.pyplot as plt

nData = 1000000
dim = 100
nNodes = 50
X = scipy.rand(nData, dim)
# print("X created")
ind = np.random.choice(nData, nNodes)
# print("ind created")
NodePositions = X[ind, ]
# print("NodePositions created")
XSquared = np.ndarray.reshape((X**2).sum(axis=1), (nData, 1))
# print("XSquared created")
print("Start")
t = time.time()
partition, dists = part.PartitionData(X, NodePositions, 100000, XSquared)
print(time.time() - t, "seconds")

if nData < 100000 and dim == 2:
    A = [None] * nNodes
    color = ['c', 'r', 'b', 'g', 'y', 'm', 'k']
示例#58
0
def gen_data(N=100, P=4):
    f = 20
    G = 1. * (sp.rand(N, f) < 0.2)
    F = sp.rand(N, 2)
    Y = sp.randn(N, P)
    return Y, F, G
示例#59
0
    # a[0] = -a[0]

    # write

    print(type(a))
    # a[:] = np.resize(np.array([[1,2],[3,255]]),(4,)).astype('uint8')
    a[1:2] = np.array([
        255,
    ]).astype('uint8')
    print(sum(a))


if __name__ == '__main__':
    # Create the array
    N = int(4)
    unshared_arr = scipy.rand(N).astype('uint8')
    a = Array(c_ubyte, np.zeros((4, ), dtype=np.uint8))
    print(a[:])

    # Create, start, and finish the child process
    p = Process(target=f, args=(a, ))
    p.start()
    p.join()

    # Print out the changed values
    print(a[:])

    # b = np.frombuffer(a.get_obj(),dtype=np.uint8)            # same address in memory
    # b = np.array(a)                             # copy
    b = a[:]
    b[0] = 10
示例#60
0
    print '-'
    # data += [[output[6],output[7],output[8],output[9],output[10],output[11]]]
    fs_dyn = []
    for j in sorted(FSNet.activation.iterkeys()):
        if j > (dim - 1):
            fs_dyn += [FSNet.activation[j]]
    data += [fs_dyn]
    #goalFS.append([FSNet.activation[12],FSNet.mismatch[12],FSNet.net[12].isActive,FSNet.net[12].failed])
    goalsDyn.append(goalsReached)
    NFSDyn.append(len(FSNet.net.keys()))
    if (currState == goal):
        if (oldState != goal):
            goalsReached += 1
            # break
        #        if (len(FSNet.failedFS)==0 and (np.rand() < 0.2)):# and (len(FSNet.activatedFS)==dim):
        if (np.rand() < 0.2):
            currState = start[:]
            FSNet.resetActivity()
            print currState, start
    if len(FSNet.matchedFS) > 0 and drawFSNet:
        plt.figure(num=('t:' + str(t)))
        plt.subplots_adjust(left=0.02, right=0.98, top=1., bottom=0.0)
        viz.drawNet(FSNet.net)

plt.figure()
plt.subplot(3, 1, 1)
plt.pcolor(np.asarray(zip(*data)))
plt.title('out FS dynamics')
#plt.figure()
plt.subplot(3, 1, 2)
plt.plot(goalsDyn)