Example #1
0
    def _dfunc(self):
        N = self.N
        
        self._createMatrices()
        
        Abar = self.A + self.A.T
        Bbar = self.B + self.B.T
        Dbar = self.D + self.D.T
        Fbar = self.F + self.F.T
        Ainv = linalg.inv(Abar)
        
        A = array(bmat([[zeros((N,N)), eye(N)],
                        [-dot(Ainv, Bbar), -dot(Ainv, Fbar)]]))

        B = array(bmat([[zeros((N,N)), zeros((N,N))],
                        [-dot(Ainv, diag(self.E)), zeros((N,N))]]))

        C = array(bmat([[zeros((N,N)), zeros((N,N))],
                        [-dot(Ainv, Dbar), zeros((N,N))]]))

        D = hstack((zeros(N), -dot(Ainv, self.C)))

        def diffMat(y):
            yy = vstack((y,)*N)
            return yy.T - yy
        
        return lambda y, t: dot(A, y) + dot(B, sin(y)) + D
def svdUpdate(U, S, V, a, b):
    """
    Update SVD of an (m x n) matrix `X = U * S * V^T` so that
    `[X + a * b^T] = U' * S' * V'^T`
    and return `U'`, `S'`, `V'`.
    
    `a` and `b` are (m, 1) and (n, 1) rank-1 matrices, so that svdUpdate can simulate 
    incremental addition of one new document and/or term to an already existing 
    decomposition.
    """
    rank = U.shape[1]
    m = U.T * a
    p = a - U * m
    Ra = numpy.sqrt(p.T * p)
    assert float(Ra) > 1e-10
    P = (1.0 / float(Ra)) * p
    n = V.T * b
    q = b - V * n
    Rb = numpy.sqrt(q.T * q)
    assert float(Rb) > 1e-10
    Q = (1.0 / float(Rb)) * q

    K = numpy.matrix(numpy.diag(list(numpy.diag(S)) + [0.0])) + numpy.bmat("m ; Ra") * numpy.bmat(" n; Rb").T
    u, s, vt = numpy.linalg.svd(K, full_matrices=False)
    tUp = numpy.matrix(u[:, :rank])
    tVp = numpy.matrix(vt.T[:, :rank])
    tSp = numpy.matrix(numpy.diag(s[:rank]))
    Up = numpy.bmat("U P") * tUp
    Vp = numpy.bmat("V Q") * tVp
    Sp = tSp
    return Up, Sp, Vp
def crankNicolson(condInitialesPhi, condInitialesPsi, condSpatiales = None, tMax = 0.001, dt=10**-6, v = 1, dx = 1):

	if np.size(condInitialesPhi) != np.size(condInitialesPsi) :
		raise Exception("La taille de condInitialesPhi doit être semblable à condInitialesPsi")

	# Constantes utiles
	n = np.size(condInitialesPhi)
	k = -dt * v**2 / dx**2 / 2
	N = int(tMax / dt)

	# Matrice de l’évolution du système 
	evolution = np.zeros((N+1,2*n))
	evolution[0,:n] = condInitialesPhi
	evolution[0,n:] = condInitialesPsi

	# On créer la matrice d'évolution 
	I = np.eye(n)
	A = np.tri(n, k = 1).T * np.tri(n, k=-1)
	A = (A + A.T - 2 * I) * k 
	M = np.array(np.bmat(((I, -dt*I/2),(A, I))))
	K = np.array(np.bmat(((I, dt*I/2),(-A, I))))
	invM = np.linalg.inv(M)
	matriceEvolution = np.dot(invM,K)	

	# On applique les conditions spatiales obtenant la liste des points qui varie dans le temps.
	if condSpatiales is not None :
		matriceEvolution[condSpatiales] = np.zeros(2*n)
		matriceEvolution[condSpatiales, condSpatiales] = 1
		matriceEvolution[condSpatiales+n] = np.zeros(2*n)

	for i in range(1,N+1):
		evolution[i] = np.dot(matriceEvolution,evolution[i-1])

	return evolution[:,:n], evolution[:,n:]
Example #4
0
    def _solve_KKT(H, A, g, ress, C_f, C_nfx):
        """    Putting code to solve KKT system in one place.    """
        # TODO fix it so the solve doesn't sometimes get errors?
        o = A.shape[0]
        K = mbmat([[ H,          A.T],
                   [ A, zeros[:o, :o]]])
        f = bmat([[        g],
                  [col(ress)]])
        xx = col(solve(K, f))
        p = - xx[:n]
        p = C_f.T * C_f * p
        mu = xx[n : n + m]

        if abs(K * xx - f).max() > 1e5 * eps:
            print('solve failed')
            rows_to_keep = get_independent_rows(A, 1e3*eps)
            ress = ress[rows_to_keep, 0]
            A = extract(A, rows_to_keep)
            o = A.shape[0]
            K = mbmat([[ H,          A.T],
                       [ A, zeros[:o, :o]]])
            f =  bmat([[         g],
                       [ col(ress)]])
            xx = col(solve(K, f))
            p = - xx[:n]
            p = C_f.T * C_f * p
            mu = xx[n : n + m]

        if abs(K * xx - f).max() > 1e5 * eps:
            print('solve failed')
            #raise Exception('Solve Still Failed!!!')

        return p, mu
Example #5
0
	def getValuesFromPose(self, P):
		'''return the virtual values of the pots corresponding to the pose P'''
		vals = []
		grads = []
		for i, r, l, placement, attach_p in zip(range(3), self.rs, self.ls, self.placements, self.attach_ps):
			#first pot axis
			a = placement.rot * col([1, 0, 0])
			#second pot axis
			b = placement.rot * col([0, 1, 0])
			#string axis
			c = placement.rot * col([0, 0, 1])

			#attach point on the joystick
			p_joystick = P * attach_p
			v = p_joystick - placement.trans
			va = v - dot(v, a)*a
			vb = v - dot(v, b)*b
			#angles of the pots
			alpha = math.atan2(dot(vb, a), dot(vb, c))
			beta = math.atan2(dot(va, b), dot(va, c))
			vals.append(alpha)
			vals.append(beta)
			
			#calculation of the derivatives
			dv = np.bmat([-P.rot.mat() * quat.skew(attach_p), P.rot.mat()])
			dva = (np.eye(3) - a*a.T) * dv
			dvb = (np.eye(3) - b*b.T) * dv
			dalpha = (1/dot(vb,vb)) * (dot(vb,c) * a.T - dot(vb,a) * c.T) * dvb
			dbeta = (1/dot(va,va)) * (dot(va,c) * b.T - dot(va,b) * c.T) * dva
			grads.append(dalpha)
			grads.append(dbeta)
		return (col(vals), np.bmat([[grads]]))
def hmc_step_stiefel(X0, log_pi, args=(), epsilon=.3, T=500):
    """
    Hamiltonian Monte Carlo for Stiefel manifolds.
    """
    n, d = X0.shape
    U = np.random.randn(*X0.shape)
    tmp = np.dot(X0.T, U)
    U = orth_stiefel_project(X0, U)
    log_pi0, G0 = log_pi(X0, *args)
    H0 = log_pi0 + .5 * np.einsum('ij,ij', U, U)
    X1 = X0.copy()
    G1 = G0
    for tau in xrange(T):
        U += 0.5 * epsilon * G1
        U = orth_stiefel_project(X0, U)
        A = np.dot(X1.T, U)
        S = np.dot(U.T, U)
        exptA = scipy.linalg.expm(-epsilon * A)
        tmp0 = np.bmat([X0, U])
        tmp1 = scipy.linalg.expm(epsilon * np.bmat([[A, -S], 
                                                     [np.eye(d), A]]))
        tmp2 = scipy.linalg.block_diag(exptA, exptA)
        tmp3 = np.dot(tmp0, np.dot(tmp1, tmp2))
        X1 = tmp3[:, :d]
        U = tmp3[:, d:]
        log_pi1, G1 = log_pi(X1, *args)
        U += 0.5 * epsilon * G1
        U = orth_stiefel_project(X0, U)
    H1 = log_pi1 + .5 * np.einsum('ij,ij', U, U)
    u = np.random.rand()
    if u < math.exp(-H1 + H0):
        return X1, 1, log_pi1
    return X0, 0, log_pi0
    def update_mini_batch(self, mini_batch, eta, lmbda, spatial_regularization, n):
        """Update the network's weights and biases by applying gradient
        descent using backpropagation to a single mini batch.  The
        ``mini_batch`` is a list of tuples ``(x, y)``, ``eta`` is the
        learning rate, ``lmbda`` is the regularization parameter, and
        ``n`` is the total size of the training data set.

        """
        nabla_b = [np.zeros(b.shape) for b in self.biases]
        nabla_w = [np.zeros(w.shape) for w in self.weights]
        for x, y in mini_batch:
            delta_nabla_b, delta_nabla_w = self.backprop(x, y)
            nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
            nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
        self.weights = [(1-eta*(lmbda/n))*w-(eta/len(mini_batch))*nw
                        for w, nw in zip(self.weights, nabla_w)]
        if spatial_regularization:
            w_imj = np.bmat([self.weights[0][:,1:],np.zeros((len(self.weights[0]),1))]).A
            w_ipj = np.bmat([np.zeros((len(self.weights[0]),1)),self.weights[0][:,:-1]]).A
            for i in range(27,783,28):
                w_imj[:,i] = np.zeros((1,len(self.weights[0])))
                w_ipj[:,i+1] = np.zeros((1,len(self.weights[0])))
            w_ijm = np.bmat([self.weights[0][:,28:],np.zeros((len(self.weights[0]),28))]).A
            w_ijp = np.bmat([np.zeros((len(self.weights[0]),28)),self.weights[0][:,:-28]]).A
            delta_e = 0.25*(w_imj+w_ipj+w_ijm+w_ijp)
            self.weights[0] = self.weights[0]+eta*(lmbda/n)*delta_e # regularization by edges
        self.biases = [b-(eta/len(mini_batch))*nb
                       for b, nb in zip(self.biases, nabla_b)]
Example #8
0
def admira(r, b, m, n, iter, A, A_star):
	if 2*r > min(m,n):
		r_prime = min(m,n)
	else:
		r_prime = 2*r

	# initialization
	X_hat = np.random.randn(m,n) # step 1
	Psi_hatU = np.matrix([])
	Psi_hatV = np.matrix([])
	for i in range(iter):
		Y = A_star(b - A(X_hat))
		(U, s, Vt) = svd(Y)
		Psi_primeU = U[:, 0:r_prime]
		Psi_primeV = Vt.T[:, 0:r_prime]
		if i > 0:
			Psi_tildeU = np.bmat([Psi_primeU, Psi_hatU])
			Psi_tildeV = np.bmat([Psi_primeV, Psi_hatV])
		else:
			Psi_tildeU = Psi_primeU
			Psi_tildeV = Psi_primeV
		AP = lambda b: APsiUV(b, A, Psi_tildeU, Psi_tildeV)
		APt = lambda s: APsitUV(s, A_star, Psi_tildeU, Psi_tildeV)
		ALS = lambda b: APt(AP(b))
		(s, res, iter) = cgsolve(ALS, APt(b), 1e-6, 100, False)
		X_tilde = Psi_tildeU*np.matrix(np.diag(np.array(s).reshape(-1)))*Psi_tildeV.T
		(U, s, Vt) = svd(X_tilde)
		Psi_hatU = U[:, 0:r]
		Psi_hatV = Vt.T[:, 0:r]
		X_hat = Psi_hatU*np.diag(s[0:r])*Psi_hatV.T

	return X_hat
Example #9
0
def tf2ss(tf):
    #assert isinstance(tf, TF), "tf2ss() requires a transfer function"
    Ts = tf.Ts
    
    # Use observable canonical form
    n = len(tf.denominator) - 1
    a0 = tf.denominator[0]
    b0 = tf.numerator[0]
    num = [numerator/a0 for numerator in tf.numerator][1:] # chop off b0
    den = [denominator/a0 for denominator in tf.denominator][1:] # chop off a0
    
    aCol = transpose(mat([-a for a in den])) 
    bCol = []
    for i in range(0, n):
        bCol.append(num[i] - den[i]*b0)
        
    if n == 1:
        A = aCol
        C = 1
    else:
        A = bmat([[aCol, bmat([eye(n-1)], [zeros(1, n-1)])]])
        C = bmat([1, zeros(1, n-1)])
    B = transpose(mat(bCol))
    D = b0
    return StateSpace(A, B, C, D, Ts)
    
Example #10
0
def doPhysics(rbd, force, torque, dtime):
    globalcom = rbd.rotmat.dot(rbd.com)+rbd.pos
    globalinertiatensor = rbd.rotmat.dot(rbd.inertiatensor).dot(rbd.rotmat.transpose())
    globalcom_hat = rm.hat(globalcom)
    # si = spatial inertia
    Isi00 = rbd.mass * np.eye(3)
    Isi01 = rbd.mass * globalcom_hat.transpose()
    Isi10 = rbd.mass * globalcom_hat
    Isi11 = rbd.mass * globalcom_hat.dot(globalcom_hat.transpose()) + globalinertiatensor
    Isi = np.bmat([[Isi00, Isi01], [Isi10, Isi11]])
    vw = np.bmat([rbd.linearv, rbd.angularw]).T
    pl = Isi*vw
    # print np.ravel(pl[0:3])
    # print np.ravel(pl[3:6])
    ft = np.bmat([force, torque]).T
    angularw_hat = rm.hat(rbd.angularw)
    linearv_hat = rm.hat(rbd.linearv)
    vwhat_mat = np.bmat([[angularw_hat, np.zeros((3,3))], [linearv_hat, angularw_hat]])
    dvw = Isi.I*(ft-vwhat_mat*Isi*vw)
    # print dvw
    rbd.dlinearv = np.ravel(dvw[0:3])
    rbd.dangularw = np.ravel(dvw[3:6])

    rbd.linearv = rbd.linearv + rbd.dlinearv * dtime
    rbd.angularw = rbd.angularw + rbd.dangularw * dtime

    return [np.ravel(pl[0:3]), np.ravel(pl[3:6])]
Example #11
0
 def generateInitData(self,outputfile):
     #initdata = {}
     num_sectors = len(self._num_factor_sectors)
     num_factors = self._num_factor_region + np.array(self._num_factor_sectors).sum()
     num_rv = self._boundary[-1]
     initBeta_all = (np.random.rand(num_rv,self._num_factor_region)-0.5) * 2
     initBeta_sectors = [(np.random.rand(self._boundary[i+1]-self._boundary[i], self._num_factor_sectors[i]) - 0.5)*2  for i in np.arange(num_sectors)]
     if len(self._num_factor_sectors)>0:
         top = np.zeros((self._boundary[0], self._num_factor_sectors[0]))
         bottom = np.zeros((self._boundary[-1]-self._boundary[1], self._num_factor_sectors[0]))
         temp = np.bmat([[top], [initBeta_sectors[0]], [bottom]])
         initBeta_all = np.bmat([initBeta_all, temp])
         for i in np.arange(1, num_sectors):
             top = np.zeros((self._boundary[i], self._num_factor_sectors[i]))
             bottom = np.zeros((self._boundary[-1]-self._boundary[1+i], self._num_factor_sectors[i]))
             temp = np.bmat([[top], [initBeta_sectors[i]], [bottom]])
             initBeta_all = np.bmat([initBeta_all, temp])
     norm = np.sqrt( np.diag(initBeta_all.dot(initBeta_all.T)))
     norm = norm.reshape(num_rv,1)
     initBeta_all = initBeta_all/norm
     initBeta_all = np.array( initBeta_all) * np.sqrt(np.random.rand(num_rv,1))
     #outputfile = 'C:\\rk\\SFM\\input\\output_N50_r2_s23222_initdata.xisx'
     writer = pd.ExcelWriter(outputfile)
     self._initdata[self._beta_region] = pd.DataFrame(data = initBeta_all[:,0:self._num_factor_region])
     self._initdata[self._beta_region].to_excel(writer, sheet_name=self._beta_region)
     for i in np.arange(len(self._num_factor_sectors)):
         self._initdata[self._beta_s + str(i)] = pd.DataFrame(data = initBeta_all[self._boundary[i]:self._boundary[i+1], \
             np.int(np.array(self._num_factor_sectors[0:i]).sum()) \
             + self._num_factor_region : np.int( np.array(self._num_factor_sectors[0: i+1]).sum()) + self._num_factor_region])
         self._initdata[self._beta_s + str(i)].to_excel(writer, sheet_name =self._beta_s + str(i))
     writer.save()
Example #12
0
    def add_new_data_point(self, x, y):
        """
        Add a new function observation to the GP.

        Parameters
        ----------
        x: 2d-array
        y: 2d-array
        """
        x = np.atleast_2d(x)
        y = np.atleast_2d(y)
        if self.gp is None:
            # Initialize GP
            # inference_method = GPy.inference.latent_function_inference.\
            #     exact_gaussian_inference.ExactGaussianInference()
            self.gp = GPy.core.GP(X=x, Y=y, kernel=self.kernel,
                                  # inference_method=inference_method,
                                  likelihood=self.likelihood)
        else:
            # Add data to GP
            # self.gp.set_XY(np.vstack([self.gp.X, x]),
            #                np.vstack([self.gp.Y, y]))

            # Add data row/col to kernel (a, b)
            # [ K    a ]
            # [ a.T  b ]
            #
            # Now K = L.dot(L.T)
            # The new Cholesky decomposition is then
            # L_new = [ L    0 ]
            #         [ c.T  d ]
            a = self.gp.kern.K(self.gp.X, x)
            b = self.gp.kern.K(x, x)

            b += 1e-8 + self.gp.likelihood.gaussian_variance(
                    self.gp.Y_metadata)

            L = self.gp.posterior.woodbury_chol
            c = sp.linalg.solve_triangular(self.gp.posterior.woodbury_chol, a,
                                           lower=True)

            d = np.sqrt(b - c.T.dot(c))

            L_new = np.asfortranarray(
                    np.bmat([[L, np.zeros_like(c)],
                             [c.T, d]]))

            K_new = np.bmat([[self.gp.posterior._K, a],
                             [a.T, b]])

            self.gp.X = np.vstack((self.gp.X, x))
            self.gp.Y = np.vstack((self.gp.Y, y))

            alpha, _ = dpotrs(L_new, self.gp.Y, lower=1)
            self.gp.posterior = Posterior(woodbury_chol=L_new,
                                          woodbury_vector=alpha,
                                          K=K_new)
        # Increment time step
        self.t += 1
def combine2(networks):
    """Combines several networks, treating the output layers as independent.
    """
    combined = Network([networks[0].sizes[0],sum([net.sizes[1] for net in networks]),sum([net.sizes[-1] for net in networks])],cost = CrossEntropyCost)
    combined.weights = [np.bmat([[net.weights[0]] for net in networks]).A , sc.block_diag(*[net.weights[1] for net in networks])]
    combined.biases = [np.bmat([[net.biases[0]] for net in networks]).A , np.bmat([[net.biases[1]] for net in networks]).A]
    return combined
    
Example #14
0
 def DilateArray(ar, scale):
     if ar.shape[1]==1:
         return np.bmat([[ar]]*scale)
     else:
         background = np.array([[np.zeros(ar.shape)]*scale]*scale, dtype=ar.dtype)
         for i in xrange(scale):
             background[i,i] = ar
         return np.array(np.bmat(background.tolist()), dtype=np.int32)
Example #15
0
def compute_sources_and_receivers(distance_data, dim):
    # number of sources and receivers
    M,N = distance_data.shape

    # construct D matrix
    D = distance_data**2

    # reconstruct S and R matrix up to a transformation
    U,si,V_h = np.linalg.svd(D)
    R_hat = np.mat(U[:,:dim].T)
    S_hat = np.mat(np.eye(dim)*si[:dim]) * np.mat(V_h[:dim,:])

    hr = np.ones((1,N)) * np.linalg.pinv(S_hat)
    I = np.eye(4)
    zeros = np.zeros((4,1))
    Hr = np.bmat('hr; zeros I')
    R_hatHr = (R_hat.T * np.linalg.inv(Hr)).H

    hs = np.linalg.pinv(R_hatHr).H * np.ones((M,1))
    zeros = np.zeros((1,4))
    Hs = np.bmat('I; zeros')
    Hs = np.linalg.inv(np.bmat('Hs hs'))

    S_prime = Hs*Hr*S_hat

    A = np.array(S_prime[4,:])
    XYZ = np.array(S_prime[1:4,:])
    X = np.array(S_prime[1,:])
    Y = np.array(S_prime[2,:])
    Z = np.array(S_prime[3,:])

    qq = np.vstack( (np.ones((1,N)), 2*XYZ, XYZ**2, 2*X*Y, 
        2*X*Z, 2*Y*Z) ).T
    q = np.linalg.pinv(qq).dot(A.T)
    Q = np.vstack( (np.hstack( (np.squeeze(q[:4].T), -0.5) ), 
        np.hstack([q[1], q[4], q[7], q[8], 0]), 
        np.hstack([q[2], q[7], q[5], q[9], 0]), 
        np.hstack([q[3],q[8],q[9],q[6],0]), 
        np.array([-0.5,0,0,0,0]) ) )

    if np.all(np.linalg.eigvals(Q[1:4,1:4]) > 0):
        C = np.linalg.cholesky(Q[1:4,1:4]).T
    else:
        C = np.eye(3)

    Hq = np.vstack((  np.array([1,0,0,0,0]),
                      np.hstack( (np.zeros((3,1)), C, np.zeros((3,1)))),
                      np.hstack( (-q[0], -2*np.squeeze(q[1:4].T), 1))
                    ))

    H = np.mat(Hq) * Hs * Hr
    Se = (H*S_hat)[1:4,:]
    Re = 0.5 * (np.linalg.inv(H).H*R_hat)[1:4,:]

    return Re, Se
Example #16
0
def FindEAndD(fundamental):
##    print fundamental
    U,S,Vh = numpy.linalg.svd(fundamental)
    V = Vh.T
##    print U
##    print S
##    print V
    e0 = V[:,2] / V[2,2]
    
##    print e0
    
    a = -e0[1] 
    b = e0[0]
    c = numpy.mat("0")
    d0 = numpy.bmat('a; b; c')
##    print d0

##    print U
    
    e1 = U[:3,2] / U[2,2]
##    print e1

####''' Alternate method in matlab, not using'''
##    
##    D,V = numpy.linalg.eig(fundamental)
####    print D
####    print V
##    e0 = V[:,0]
##    a = -e0[1]
##    b = e0[0]
##    c = numpy.mat("0")
##    d0 = numpy.bmat('a; b; c')
##    print V
##    print e0
##    print d0
##
##    D,V = numpy.linalg.eig(fundamental.T)
##    e1 = V[:,0]
####    print V
##    print e1



    Fd0 = fundamental * d0
##    print Fd0
    Fd0[2] = Fd0[2]**2
    Fd0 = Fd0 / math.sqrt(Fd0.sum())
##    print Fd0
    a = -Fd0[1]
    b = Fd0[0]
    c = numpy.mat("0")
    d1 = numpy.bmat('a;b;c')
##    print d1

    return e0, d0, e1, d1
Example #17
0
def construct_A_matrix(n_gates, filt):
    """
    Construct a row-augmented A matrix. Equation 5 in Giangrande et al, 2012.

    A is a block matrix given by:

    .. math::

        \\bf{A} = \\begin{bmatrix} \\bf{I} & \\bf{-I} \\\\\\\\
                  \\bf{-I} & \\bf{I} \\\\\\\\ \\bf{Z}
                  & \\bf{M} \\end{bmatrix}

    where
        :math:`\\bf{I}` is the identity matrix
        :math:`\\bf{Z}` is a matrix of zeros
        :math:`\\bf{M}` contains our differential constraints.

    Each block is of shape n_gates by n_gates making
    shape(:math:`\\bf{A}`) = (3 * n, 2 * n).

    Note that :math:`\\bf{M}` contains some side padding to deal with edge
    issues

    Parameters
    ----------
    n_gates : int
        Number of gates, determines size of identity matrix
    filt : array
        Input filter.

    Returns
    -------
    a : matrix
        Row-augmented A matrix.

    """
    Identity = np.eye(n_gates)
    filter_length = len(filt)
    M_matrix_middle = np.diag(np.ones(n_gates - filter_length + 1), k=0) * 0.0
    posn = np.linspace(-1.0 * (filter_length - 1) / 2, (filter_length - 1)/2,
                       filter_length)
    for diag in range(filter_length):
        M_matrix_middle = M_matrix_middle + np.diag(np.ones(
            int(n_gates - filter_length + 1 - np.abs(posn[diag]))),
            k=int(posn[diag])) * filt[diag]
    side_pad = (filter_length - 1) // 2
    M_matrix = np.bmat(
        [np.zeros([n_gates-filter_length + 1, side_pad], dtype=float),
         M_matrix_middle, np.zeros(
             [n_gates-filter_length+1, side_pad], dtype=float)])
    Z_matrix = np.zeros([n_gates - filter_length + 1, n_gates])
    return np.bmat([[Identity, -1.0 * Identity], [Identity, Identity],
                   [Z_matrix, M_matrix]])
    def train(self):

        if (self.status != 'init'):
            print("Please load train data and init W first.")
            return self.W

        self.status = 'train'

        original_X = self.train_X[:, 1:]

        K = utility.Kernel.kernel_matrix(self, original_X)

        # P = Q, q = p, G = -A, h = -c

        P = cvxopt.matrix(np.bmat([[K, -K], [-K, K]]))
        q = cvxopt.matrix(np.bmat([self.epsilon - self.train_Y, self.epsilon + self.train_Y]).reshape((-1, 1)))
        G = cvxopt.matrix(np.bmat([[-np.eye(2 * self.data_num)], [np.eye(2 * self.data_num)]]))
        h = cvxopt.matrix(np.bmat([[np.zeros((2 * self.data_num, 1))], [self.C * np.ones((2 * self.data_num, 1))]]))
        # A = cvxopt.matrix(np.append(np.ones(self.data_num), -1 * np.ones(self.data_num)), (1, 2*self.data_num))
        # b = cvxopt.matrix(0.0)
        cvxopt.solvers.options['show_progress'] = False
        solution = cvxopt.solvers.qp(P, q, G, h)

        # Lagrange multipliers
        alpha = np.array(solution['x']).reshape((2, -1))
        self.alpha_upper = alpha[0]
        self.alpha_lower = alpha[1]
        self.beta = self.alpha_upper - self.alpha_lower

        sv = abs(self.beta) > 1e-5
        self.sv_index = np.arange(len(self.beta))[sv]
        self.sv_beta = self.beta[sv]
        self.sv_X = original_X[sv]
        self.sv_Y = self.train_Y[sv]

        free_sv_upper = np.logical_and(self.alpha_upper > 1e-5, self.alpha_upper < self.C)
        self.free_sv_index_upper = np.arange(len(self.alpha_upper))[free_sv_upper]
        self.free_sv_alpha_upper = self.alpha_upper[free_sv_upper]
        self.free_sv_X_upper = original_X[free_sv_upper]
        self.free_sv_Y_upper = self.train_Y[free_sv_upper]

        free_sv_lower = np.logical_and(self.alpha_lower > 1e-5, self.alpha_lower < self.C)
        self.free_sv_index_lower = np.arange(len(self.alpha_lower))[free_sv_lower]
        self.free_sv_alpha_lower = self.alpha_lower[free_sv_lower]
        self.free_sv_X_lower = original_X[free_sv_lower]
        self.free_sv_Y_lower = self.train_Y[free_sv_lower]

        short_b_upper = self.free_sv_Y_upper[0] - np.sum(self.sv_beta * utility.Kernel.kernel_matrix_xX(self, self.free_sv_X_upper[0], self.sv_X)) - self.epsilon
        short_b_lower = self.free_sv_Y_lower[0] - np.sum(self.sv_beta * utility.Kernel.kernel_matrix_xX(self, self.free_sv_X_lower[0], self.sv_X)) + self.epsilon

        self.sv_avg_b = (short_b_upper + short_b_lower) / 2

        return self.W
Example #19
0
 def _batch_incremental_pca(x, G, S):
   r = G.shape[1]
   b = x.shape[0]
   
   xh = G.T.dot(x)
   H  = x - G.dot(xh)
   J, W = scipy.linalg.qr(H, overwrite_a=True, mode='full', check_finite=False)
   
   Q = np.bmat( [[np.diag(S), xh], [np.zeros((b,r), dtype=np.float32), W]] )
   
   G_new, St_new, Vtoss = scipy.linalg.svd(Q, full_matrices=False, check_finite=False)
   St_new=St_new[:r]
   G_new= np.asarray(np.bmat([G, J]).dot( G_new[:,:r] ))
   
   return G_new, St_new
def ratNormScroll(degList,dataType):
    # return the matrix for the RNS w/ degrees in degList
    blocks = []
    numBlocks = len(degList)
    maxVal = max(degList[:-1]+[degList[-1]-1]) + 1
    for i in xrange(len(degList)):
        thisBlock = np.zeros((numBlocks,degList[i]+1),dtype=dataType)
        # ith row = ones
        for j in xrange(degList[i]+1):
            thisBlock[i][j]=1
            thisBlock[numBlocks-1][j] = j
        blocks.append(thisBlock)
    toReturn = np.bmat(blocks)
    lastRow = maxVal - np.sum(toReturn,axis=0)
    return np.bmat([[toReturn],[lastRow]])
    def test_falker(self):
        """Test matrices giving some Nan generalized eigen values."""
        M = diag(array(([1,0,3])))
        K = array(([2,-1,-1],[-1,2,-1],[-1,-1,2]))
        D = array(([1,-1,0],[-1,1,0],[0,0,0]))
        Z = zeros((3,3))
        I = identity(3)
        A = bmat([[I,Z],[Z,-K]])
        B = bmat([[Z,I],[M,D]])

        olderr = np.seterr(all='ignore')
        try:
            self._check_gen_eig(A, B)
        finally:
            np.seterr(**olderr)
Example #22
0
def crankNicolson2D(condInitialesPhi, condInitialesPsi, condSpatiales = None, tMax = 0.001, dt=10**-6, v = 100, dx = 1, intervalSauvegarde=1):
	if not np.array_equal(np.shape(condInitialesPhi), np.shape(condInitialesPsi)):
		raise Exception("La taille de condInitialesPhi doit être similaire à la taille condInitialesPsi.")


	# Constantes utiles
	n = np.shape(condInitialesPhi)  #(ny,nx)

	if n[0] != n[1] :
		raise Exception("Les dimensions x et y doivent être similaires.")

	n = n[0]
	k = -dt * v**2 / dx**2 / 2
	N = int(tMax / dt)

	# Matrice de l’évolution du système 
	evolution = np.zeros((int(N/intervalSauvegarde)+1,2*n*n))
	evolution[0,:n*n] = condInitialesPhi.flatten()
	evolution[0,n*n:] = condInitialesPsi.flatten()

	phi = evolution[0]

	I = np.eye(n*n)
	A = np.tri(n*n, k = 1).T * np.tri(n*n, k=-1)
	A = (A + A.T - 4 * np.eye(n*n))*k
	B = np.eye((n-1)*n)*k
	A[:-n,n:] = A[:-n,n:] + B
	A[n:,:-n] = A[n:,:-n] + B

	M = np.array(np.bmat( ((I, -dt*I/2), (A, I))))
	K = np.array(np.bmat( ((I, dt*I/2), (-A, I))))

	invM = np.linalg.inv(M)
	matriceEvolution = np.dot(invM,K)	

	# On applique les conditions spatiales obtenant la liste des points qui varie dans le temps.
	if condSpatiales is not None :
		idx = np.array(condSpatiales[0]+n*condSpatiales[1],"int")
		matriceEvolution[idx] = np.zeros(2*n*n)
		matriceEvolution[idx, idx] = 1
		matriceEvolution[idx+n*n] = np.zeros(2*n*n)

	for i in range(1,N+1):
		phi = np.dot(matriceEvolution,phi)
		if i % intervalSauvegarde == 0:
			evolution[int(i // intervalSauvegarde)] = phi

	return evolution[:,:n*n], evolution[:,n*n:]
Example #23
0
def pad(mat, padrow, padcol):
    """Add additional rows/columns to `mat`. The new rows/columns will be initialized with zeros.

    Parameters
    ----------
    mat : numpy.ndarray
        Input 2D matrix
    padrow : int
        Number of additional rows
    padcol : int
        Number of additional columns

    Returns
    -------
    numpy.matrixlib.defmatrix.matrix
        Matrix with needed padding.

    """
    if padrow < 0:
        padrow = 0
    if padcol < 0:
        padcol = 0
    rows, cols = mat.shape
    return np.bmat([
        [mat, np.matrix(np.zeros((rows, padcol)))],
        [np.matrix(np.zeros((padrow, cols + padcol)))],
    ])
Example #24
0
 def tdhf(cls,hfwavefunction,hamiltonian):
     occ = hfwavefunction.occ
     Nelec = occ['alpha'] + occ['beta']
     dim = hamiltonian.dim
     C = hfwavefunction.coefficient
     Nov = Nelec*(2*dim - Nelec)
     #Transfer Fock integral from spatial to spin basis
     fs = spinfock(hfwavefunction.eorbitals)
     #Transfer electron repulsion integral from atomic basis
     #to molecular basis
     hamiltonian.operators['electron_repulsion'].basis_transformation(C)
     #build double bar integral <ij||kl>
     spinints = hamiltonian.operators['electron_repulsion'].double_bar
     
     A = np.zeros((Nov,Nov))
     B = np.zeros((Nov,Nov))
     I = -1
     for i in range(0,Nelec):
         for a in range(Nelec,dim*2):
             I += 1
             J = -1
             for j in range(0,Nelec):
                 for b in range(Nelec,dim*2):
                     J += 1
                     A[I,J] = (fs[a,a] - fs[i,i])*(i==j)*(a==b)+spinints[a,j,i,b]
                     B[I,J] = spinints[a,b,i,j]
     M = np.bmat([[A,B],[-B,-A]])
     ETD,CTD = np.linalg.eig(M)
     print ETD
Example #25
0
def InterfaceOblique(na, nb, n, k1):
    ig = _InterfaceGeometry(na, nb, n, k1)
#     theta_a, theta_b = intgeo.theta_a, intgeo.theta_b
#     area_compensation_a, area_compensation_b = intgeo[1]
#     P, S = intgeo[2]
#     R12, R13, R21, R31, R34, R43, R24, R42 = intgeo[3]
#     k2, k3, k4 = intgeo[4]
    #
    rpa, tpa, rsa, tsa = geo.FresnelOblique(na, nb, ig.theta_a)
    rpb, tpb, rsb, tsb = geo.FresnelOblique(nb, na, ig.theta_b)
    tpa *= ig.area_compensation_a
    tsa *= ig.area_compensation_a
    tpb *= ig.area_compensation_b
    tsb *= ig.area_compensation_b
    ra = rpa * ig.P + rsa * ig.S
    rb = rpb * ig.P + rsb * ig.S
    ta = tpa * ig.P + tsa * ig.S
    tb = tpb * ig.P + tsb * ig.S
    S12 = ig.R12.dot(ra)
    S13 = ig.R13.dot(tb)
    S21 = ig.R21.dot(ra)
    S24 = ig.R24.dot(tb)
    S31 = ig.R31.dot(ta)
    S34 = ig.R34.dot(rb)
    S42 = ig.R42.dot(ta)
    S43 = ig.R43.dot(rb)
    ZZZ = np.zeros((3, 3), dtype=complex)
    S = np.array(np.bmat([[ZZZ, S12, S13, ZZZ],
                          [S21, ZZZ, ZZZ, S24],
                          [S31, ZZZ, ZZZ, S34],
                          [ZZZ, S42, S43, ZZZ]]))

    return S, ig.k2, ig.k3, ig.k4
Example #26
0
def _create_feature_glyph(feature, vbs):
    r"""
    Create glyph of feature pixels.

    Parameters
    ----------
    feature : (N, D) ndarray
        The feature pixels to use.
    vbs: int
        Defines the size of each block with vectors of the glyph image.
    """
    # vbs = Vector block size
    num_bins = feature.shape[2]
    # construct a "glyph" for each orientation
    block_image_temp = np.zeros((vbs, vbs))
    # Create a vertical line of ones, to be the first vector
    block_image_temp[:, round(vbs / 2) - 1:round(vbs / 2) + 1] = 1
    block_im = np.zeros((block_image_temp.shape[0],
                         block_image_temp.shape[1],
                         num_bins))
    # First vector as calculated above
    block_im[:, :, 0] = block_image_temp
    # Number of bins rotations to create an 'asterisk' shape
    for i in range(1, num_bins):
        block_im[:, :, i] = imrotate(block_image_temp, -i * vbs)

    # make pictures of positive feature_data by adding up weighted glyphs
    feature[feature < 0] = 0
    glyph_im = np.sum(block_im[None, None, :, :, :] *
                      feature[:, :, None, None, :], axis=-1)
    glyph_im = np.bmat(glyph_im.tolist())
    return glyph_im
Example #27
0
def _compute_initial_position_alt_2(A, a, B, b):
    """
    This is an alternative implementation to _compute_initial_position
    using Rainer's line of thought.
    """
    rows = cols = (0,1,2,3,6)
    AL = A[rows,:][:,cols]
    BL = B[rows,:][:,cols]
    AR = np.array([
        [ 0,  0, 0, 0],
        [-1,  0, 0, 0],
        [ 0,  0, 0, 0],
        [ 0, -1, 0, 0],
        [ 0,  0, 0, 0],
    ])
    BR = np.array([
        [0, 0,  0,  0],
        [0, 0, -1,  0],
        [0, 0,  0,  0],
        [0, 0,  0, -1],
        [0, 0,  0,  0],
    ])
    M = np.bmat([[AL, AR],
                 [BL, BR]])
    m = np.array([a[0], 0, a[1], 0, 1, b[0], 0, b[1], 0, 1])
    return np.linalg.lstsq(M, m)[0][:4]
Example #28
0
def test_to_matrix():
    np.random.seed(0)
    A = np.random.randn(2, 2)
    B = np.random.randn(3, 3)
    C = np.random.randn(3, 3)

    X = np.bmat([[np.eye(2) + A, np.zeros((2, 3))], [np.zeros((3, 2)), B.dot(C.T)]])

    C = sps.csc_matrix(C)

    Aop = NumpyMatrixOperator(A)
    Bop = NumpyMatrixOperator(B)
    Cop = NumpyMatrixOperator(C)

    Xop = BlockDiagonalOperator([LincombOperator([IdentityOperator(NumpyVectorSpace(2)), Aop],
                                                 [1, 1]), Concatenation(Bop, AdjointOperator(Cop))])

    assert np.allclose(X, to_matrix(Xop))
    assert np.allclose(X, to_matrix(Xop, format='csr').toarray())

    np.random.seed(0)
    V = np.random.randn(10, 2)
    Vva = NumpyVectorSpace.make_array(V.T)
    Vop = VectorArrayOperator(Vva)
    assert np.allclose(V, to_matrix(Vop))
    Vop = VectorArrayOperator(Vva, transposed=True)
    assert np.allclose(V, to_matrix(Vop).T)
Example #29
0
    def __init__(self, name, parameters, **kwargs):
        self.name = name
        self.setdefaults(parameters)

        if 'logger' in kwargs:
            self.logger = kwargs.get('logger')
        else:
            self.logger = logging.getLogger('BGModel')

        # Initialization BG-models
        # Mainly table of FG/BG ration dependent time constants and their effect on
        # leaky integration

        self.FBR_values = np.arange(self.FBR_RANGE[0],self.FBR_RANGE[1], 0.1)

        # Correction flat between -self.noiseSTD < FBR < +self.noiseSTD
        # idx of first element in condition value > -self.noiseSTD. [0][0] returns the index from the tuple.
        indLow = [(idx,val) for idx,val in enumerate(self.FBR_values) if val > -self.noiseSTD][0][0]
        # idx of first element in condition value > self.noiseSTD. [0][0] returns the index from the tuple.
        indHigh= [(idx,val) for idx,val in enumerate(self.FBR_values) if val > self.noiseSTD][0][0]

        col1 = np.linspace(self.FBR_SCOPE[0],0, indLow)
        col2 = np.linspace(0,0,indHigh-indLow)
        col3 = np.linspace(0,self.FBR_SCOPE[1],len(self.FBR_values)-indHigh)

        #bmat notation is confusing, this will just be a (1, X) vector
        self.FBR_cor = np.power(10,(0.1*np.bmat('col1 col2 col3')))

        # The effective loss results from an adaptation of
        # tc-effective = tc*FBR_cor

        self.loss = np.power(math.e, (-self.delta_time/(np.transpose(self.FBR_cor) * self.tau)))  # Mote matix multiplication: result numel(tau)*numel(FRR_SCOPE)
Example #30
0
File: gw.py Project: berquist/pyscf
def rpa(gw, using_tda=False, using_casida=True, method='TDH'):
    '''Get the RPA eigenvalues and eigenvectors.

    Q^\dagger = \sum_{ia} X_{ia} a^+ i - Y_{ia} i^+ a
    Leads to the RPA eigenvalue equations:
      [ A  B ][X] = omega [ 1  0 ][X]
      [ B  A ][Y]         [ 0 -1 ][Y]
    which is equivalent to
      [ A  B ][X] = omega [ 1  0 ][X]
      [-B -A ][Y] =       [ 0  1 ][Y]
    
    See, e.g. Stratmann, Scuseria, and Frisch, 
              J. Chem. Phys., 109, 8218 (1998)
    '''
    A, B = rpa_AB_matrices(gw, method=method)

    if using_tda:
        ham_rpa = A
        e, x = eig(ham_rpa)
        return e, x
    else:
        if not using_casida:
            ham_rpa = np.array(np.bmat([[A,B],[-B,-A]]))
            assert is_positive_def(ham_rpa)
            e, xy = eig_asymm(ham_rpa)
            return e, xy
        else:
            assert is_positive_def(A-B)
            sqrt_A_minus_B = scipy.linalg.sqrtm(A-B)
            ham_rpa = np.dot(sqrt_A_minus_B, np.dot((A+B),sqrt_A_minus_B))
            esq, t = eig(ham_rpa)
            return np.sqrt(esq), t
import torch
import numpy.testing as npt

from torch.autograd import Variable
import torch_gbds.lib.sym_blk_tridiag_inv as sym

# Build a block tridiagonal matrix
prec = np.float32
npA = np.mat('1 6; 6 4', dtype=prec)
npB = np.mat('2 7; 7 4', dtype=prec)
npC = np.mat('3 9; 9 1', dtype=prec)
npD = np.mat('7 2; 9 3', dtype=prec)
npZ = np.mat('0 0; 0 0', dtype=prec)

# a 2x2 block tridiagonal matrix with 4x4 blocks
fullmat = np.bmat([[npA, npB, npZ, npZ], [npB.T, npC, npD, npZ],
                   [npZ, npD.T, npC, npB], [npZ, npZ, npB.T, npC]])

alist = [npA, npC, npC, npC]
blist = [npB, npD, npB]
AAin = Variable(torch.Tensor(np.array(alist)))
BBin = Variable(torch.Tensor(np.array(blist)))


def test_compute_sym_blk_tridiag():
    D, OD, S = sym.compute_sym_blk_tridiag(AAin, BBin)

    invmat = np.linalg.inv(fullmat)

    benchD = [
        invmat[i:(i + 2), i:(i + 2)] for i in range(0, invmat.shape[0], 2)
    ]
Example #32
0
         for j in range(np.shape(Sig)[0])]
rho = -np.log(nfact * np.array(Sis) / np.size(Nmat[1]) +
              1e-17)  #adding machine precision value to prevemnt infs later

#building the least squeares matrix
ff = np.diag(np.sum(Sig, axis=0))
fA = Sig
fV = Sig * ns
AA = np.diag(np.sum(Sig, axis=1))
AV = np.diag(np.sum(ns * Sig, axis=1))
VV = np.diag(np.sum(nss * Sig, axis=1))
Af = fA.T
Vf = fV.T
VA = AV.T

L = np.bmat([[ff, Af, Vf], [fA, AA, VA], [fV, AV, VV]])

#building the solution vector
bf = np.diag(np.dot(rho.T, Sig))
bA = np.diag(np.dot(rho, Sig.T))
bV = np.diag(np.dot(rho, (ns * Sig).T))

B = np.bmat([[bf], [bA], [bV]]).T

Sol = la.lstsq(L, B)
F = np.ravel(Sol[0][0:np.shape(ff)[1]]
             )  # F corresponds to the first parameters that come from the fit

#print "\n Frustration is given by ",F

#printing Frustrations to file
    def update(self, measurement, measurementCovariance, new):
        global seenLandmarks_
        global dimR_
        # get robot current pose
        currentRobotAbs = self.robot.getPose()

        # get landmark absolute position estimate given current pose and measurement (robot.sense)
        [landmarkAbs, G1,
         G2] = self.robot.inverseSense(currentRobotAbs, measurement)
        # get KF state mean and covariance
        stateMean = self.stateMean
        stateCovariance = self.stateCovariance

        # print 'update mean: ', currentRobotAbs
        print '###############################'

        # if new landmark augment stateMean and stateCovariance
        if new:
            stateMean = np.concatenate(
                (stateMean, [[landmarkAbs[0]], [landmarkAbs[1]]]), axis=0)
            Prr = self.robot.getCovariance()
            # print 'Prr:',Prr

            if len(seenLandmarks_) == 1:
                #print 'Robo lanf If start '
                Plx = np.dot(G1, Prr)
                #print'Robot Land If stop'
            else:
                #print 'Robo Land Else start'

                # GOING FOR LUNCH.. Will be back Soon

                lastStateCovariance = KalmanFilter.getStateCovariance(self)
                #print 'STEP 1'
                #print 'Last covar: ',lastStateCovariance
                #end=len(lastStateCovariance[0][:])
                end = lastStateCovariance.shape
                print 'end:', end[1]

                #Prm    = lastStateCovariance[0:3, -1*(end[1]-3):(end[1]-1)]
                Prm = lastStateCovariance[0:3, 3:]
                #'''
                #print 'STEP 2'
                #print 'G1 : ', G1
                #print 'Prr : ', Prr
                #print 'Prm : ', Prm
                #'''
                Plx = np.dot(G1, np.bmat([[Prr, Prm]]))
                #print 'Robo Lanf Else stop'
            Pll = np.array(np.dot(np.dot(G1, Prr),
                                  np.transpose(G1))) + np.array(
                                      np.dot(np.dot(G2, measurementCovariance),
                                             np.transpose(G2)))
            P = np.bmat([[stateCovariance, np.transpose(Plx)], [Plx, Pll]])
            stateCovariance = P
            #print ' Stop'
            # else:
            # if old landmark stateMean & stateCovariance remain the same (will be changed in the update phase by the kalman gain)
            # calculate expected measurement

        print 'state covar : ', stateCovariance.shape
        #print 'inside update', currentRobotAbs

        [landmarkAbs, Hr, Hl] = Relative2AbsoluteXY(currentRobotAbs,
                                                    measurement)
        print 'Label : ', measurement[2]
        print 'land z: ', measurement[0]
        print 'land th: ', measurement[1]
        print 'landmarkAbs: ', landmarkAbs
        print 'robot absolute pose : ', currentRobotAbs
        # get measurement
        Z = ([[measurement[0]], [measurement[1]]])

        #Update
        x = stateMean
        label = measurement[2]

        # y = Z - expectedMeasurement
        # AKA Innovation Term
        measured = ([[landmarkAbs[0]], [landmarkAbs[1]]])
        y = np.subtract(Z, measured)

        #print 'z: ', Z
        print '________'
        #print 'meas : ',  measured
        #print 'xxx'
        #print 'y : ', y
        #print '________________'

        # build H
        # H = [Hr, 0, ..., 0, Hl,  0, ..,0] position of Hl depends on when was the landmark seen? H is C ??
        H = np.reshape(Hr, (2, 3))

        print ' H Start: ', seenLandmarks_.index(label)

        for i in range(0, seenLandmarks_.index(label)):
            H = np.bmat([[H, np.zeros([2, 2])]])
        H = np.bmat([[H, np.reshape(Hl, (2, 2))]])
        for i in range(0,
                       len(seenLandmarks_) - seenLandmarks_.index(label) - 1):
            H = np.bmat([[H, np.zeros([2, 2])]])
        #print 'H done'
        #print 'HHHHHHHHHHHHHHHHHHHHHHH'
        # compute S
        # print 'Before Getting Stuck'
        #print 'H : ', H.shape
        #print 'State covar: ',stateCovariance
        #print '___________ERROR Start_______________'
        print 'G1 : ', G1
        print 'G2 : ', G2
        print 'H : ', H
        try:
            s1 = np.dot(H, stateCovariance)
        except ValueError:
            print 'Value Error S1'
            print 'H shape', H.shape
            print 'State Cov', stateCovariance.shape
            return
        # print 's1: ', s1
        #print 'xxxxxxxxxxxxxxxx'
        #print 'Done s1'

        try:
            S = np.add(np.dot(np.dot(H, stateCovariance), np.transpose(H)),
                       measurementCovariance)
        except ValueError:
            print('Value error S')
            return

        #print '__________ERROR ZONE CROSSED________________'
        #print 'Done s'

        if (S < 0.000001).all():
            print('Non-invertible S Matrix')
            raise ValueError
            return
        #else:
        # print 'mat invertible'
        # calculate Kalman gain
        K = np.array(
            np.dot(np.dot(stateCovariance, np.transpose(H)), np.linalg.inv(S)))

        #print 'K gain Done',K

        # compute posterior mean
        posteriorStateMean = np.add(stateMean, np.dot(K, y))

        #print ' New mean state DONE'

        # compute posterior covariance
        kc = np.array(np.dot(K, H))
        kcShape = len(kc)
        #print 'Kc shape',kcShape

        posteriorStateCovariance = np.dot(np.subtract(np.eye(kcShape), kc),
                                          stateCovariance)

        #print ' New Covar Done'
        # print 'xxxxxxxxxxxxxxxxxxxxxxxx'

        # check theta robot is a valid theta in the range [-pi, pi]
        posteriorStateMean[2][0] = pi2pi(posteriorStateMean[2][0])

        #print 'pi2pi Done'
        # update robot pose

        #print 'post state mean: ', posteriorStateMean

        robotPose = ([posteriorStateMean[0][0]], [posteriorStateMean[1][0]],
                     [posteriorStateMean[2][0]])

        #print 'calculate robot pose done'
        # set robot pose
        self.robot.setPose(robotPose)
        #print 'update',robotPose
        # updated robot covariance
        robotCovariance = posteriorStateCovariance[0:3, 0:3]
        #print 'updated Cov',robotCovariance
        # set robot covariance
        self.robot.setCovariance(robotCovariance)
        # set posterior state mean
        KalmanFilter.setStateMean(self, posteriorStateMean)
        # set posterior state covariance
        KalmanFilter.setStateCovariance(self, posteriorStateCovariance)
        #print 'robot absolute pose : ',robotPose
        vec = mapping(seenLandmarks_.index(label) + 1)
        landmark_abs_[int(label) - 1].append(
            [[stateMean[dimR_ + vec[0] - 1][0]],
             [stateMean[dimR_ + vec[1] - 1][0]]])
        for i in range(0, len(landmark_abs_)):
            print 'landmark absolute position : ', i + 1, ',', np.median(
                landmark_abs_[i], 0)

        print 'post mean: ', posteriorStateMean
        print 'post covar: ', posteriorStateCovariance

        print '____END______'
        return posteriorStateMean, posteriorStateCovariance
Example #34
0
                        len(Ct[0]) / number_correlations_files].reshape(
                            number_nodes, number_nodes))
Ct0_12 = np.asmatrix(Ct[0, 5 * len(Ct[0]) / number_correlations_files:6 *
                        len(Ct[0]) / number_correlations_files].reshape(
                            number_nodes, number_nodes))
Ct0_20 = np.asmatrix(Ct[0, 6 * len(Ct[0]) / number_correlations_files:7 *
                        len(Ct[0]) / number_correlations_files].reshape(
                            number_nodes, number_nodes))
Ct0_21 = np.asmatrix(Ct[0, 7 * len(Ct[0]) / number_correlations_files:8 *
                        len(Ct[0]) / number_correlations_files].reshape(
                            number_nodes, number_nodes))
Ct0_22 = np.asmatrix(Ct[0, 8 * len(Ct[0]) / number_correlations_files:9 *
                        len(Ct[0]) / number_correlations_files].reshape(
                            number_nodes, number_nodes))
#Create the matrix C(t=0)
Ct0 = np.bmat(([Ct0_00, Ct0_01, Ct0_02], [Ct0_10, Ct0_11,
                                          Ct0_12], [Ct0_20, Ct0_21, Ct0_22]))
Ct0_stat = (Ct0 + Ct0.T) / 2
#Calculate the matrix R as the inverse of the matrix C(t=0)
R = linalg.pinv(Ct0, rcond=1e-12)
#Change the format of R in order to obtain the inverse of C in each time step. Matrix-> Vector
Ctinv0 = R
Ctinv[0,:] = np.bmat((Ctinv0[0:number_nodes, 0:number_nodes].reshape(1,number_nodes**2), Ctinv0[0:number_nodes, number_nodes:2*number_nodes].reshape(1,number_nodes**2), Ctinv0[0:number_nodes, 2*number_nodes:3*number_nodes].reshape(1,number_nodes**2)\
                , Ctinv0[number_nodes:2*number_nodes, 0:number_nodes].reshape(1,number_nodes**2), Ctinv0[number_nodes:2*number_nodes, number_nodes:2*number_nodes].reshape(1,number_nodes**2), Ctinv0[number_nodes:2*number_nodes,2*number_nodes:3*number_nodes].reshape(1,number_nodes**2)\
                    , Ctinv0[2*number_nodes:3*number_nodes, 0:number_nodes].reshape(1,number_nodes**2), Ctinv0[2*number_nodes:3*number_nodes, number_nodes:2*number_nodes].reshape(1,number_nodes**2), Ctinv0[2*number_nodes:3*number_nodes, 2*number_nodes:3*number_nodes].reshape(1,number_nodes**2)))
#Calculate C(t) normalized
Ctnorm0 = Ct0_stat.dot(R)
#Change format: Matrix -> Vector
Ctnorm[0,:] = np.bmat((Ctnorm0[0:number_nodes, 0:number_nodes].reshape(1,number_nodes**2), Ctnorm0[0:number_nodes, number_nodes:2*number_nodes].reshape(1,number_nodes**2), Ctnorm0[0:number_nodes, 2*number_nodes:3*number_nodes].reshape(1,number_nodes**2)\
                , Ctnorm0[number_nodes:2*number_nodes, 0:number_nodes].reshape(1,number_nodes**2), Ctnorm0[number_nodes:2*number_nodes, number_nodes:2*number_nodes].reshape(1,number_nodes**2), Ctnorm0[number_nodes:2*number_nodes,2*number_nodes:3*number_nodes].reshape(1,number_nodes**2)\
                    , Ctnorm0[2*number_nodes:3*number_nodes, 0:number_nodes].reshape(1,number_nodes**2), Ctnorm0[2*number_nodes:3*number_nodes, number_nodes:2*number_nodes].reshape(1,number_nodes**2), Ctnorm0[2*number_nodes:3*number_nodes, 2*number_nodes:3*number_nodes].reshape(1,number_nodes**2)))
    def update(self,
               measurement,
               measurementCovariance,
               new,
               currentStateMean=None,
               currentStateCovariance=None,
               currentRobotAbs=None,
               currentRobotCov=None):
        global seenLandmarks_
        global dimR_
        global seenLandmarksX_
        global it
        # get robot current pose
        if currentRobotAbs == None:
            currentRobotAbs = self.robot.getPose()
        if currentRobotCov == None:
            currentRobotCov = self.robot.getCovariance()
        label = measurement[2]
        # get landmark absolute position estimate given current pose and measurement (robot.sense)
        [landmarkAbs, G1,
         G2] = self.robot.inverseSense(currentRobotAbs, measurement)
        # get KF state mean and covariance

        if currentStateMean == None:
            currentStateMean = stateMean = np.array(self.stateMean)
        else:
            stateMean = currentStateMean

        if currentStateCovariance == None:
            currentStateCovariance = stateCovariance = np.array(
                self.stateCovariance)
        else:
            stateCovariance = currentStateCovariance

        print '###############################'

        # if new landmark augment stateMean and stateCovariance
        if new:
            stateMean = np.concatenate(
                (stateMean, [[landmarkAbs[0]], [landmarkAbs[1]]]), axis=0)
            Prr = self.robot.getCovariance()
            # print 'Prr:',Prr

            if len(seenLandmarks_) == 1:
                #print 'Robo lanf If start '
                Plx = np.dot(G1, Prr)
                #print'Robot Land If stop'
            else:
                lastStateCovariance = KalmanFilter.getStateCovariance(self)
                Prm = lastStateCovariance[0:3, 3:]
                Plx = np.dot(G1, np.bmat([[Prr, Prm]]))

            Pll = np.array(np.dot(np.dot(G1, Prr),
                                  np.transpose(G1))) + np.array(
                                      np.dot(np.dot(G2, measurementCovariance),
                                             np.transpose(G2)))
            P = np.bmat([[stateCovariance, np.transpose(Plx)], [Plx, Pll]])
            stateCovariance = P

        else:
            # if old landmark stateMean & stateCovariance remain the same (will be changed in the update phase by the kalman gain)
            # calculate expected measurement
            vec = mapping(seenLandmarks_.index(label) + 1)
            expectedMeas = [0, 0]
            expectedMeas[0] = np.around(stateMean[dimR_ + vec[0] - 1][0], 3)
            expectedMeas[1] = np.around(stateMean[dimR_ + vec[1] - 1][0], 3)

            [landmarkRelative, _,
             _] = Absolute2RelativeXY(currentRobotAbs, expectedMeas)
            #Z = ([ [np.around(landmarkAbs[0],3)],[np.around(landmarkAbs[1],3)] ])
            measured = ([
                np.around(landmarkRelative[0][0], 3),
                np.around(landmarkRelative[1][0], 3)
            ])

            # y = Z - expectedMeasurement
            # AKA Innovation Term
            #measured = ([ [np.around(expectedMeas[0],3)],[np.around(expectedMeas[1],3)] ])
            Z = ([np.around(measurement[0], 3), np.around(measurement[1], 3)])

            y = np.array(RelativeLandmarkPositions(Z, measured))

            # build H
            # H = [Hr, 0, ..., 0, Hl,  0, ..,0] position of Hl depends on when was the landmark seen? H is C ??
            H = np.reshape(G1, (2, 3))
            for i in range(0, seenLandmarks_.index(label)):
                H = np.bmat([[H, np.zeros([2, 2])]])
            H = np.bmat([[H, np.reshape(G2, (2, 2))]])
            for i in range(
                    0,
                    len(seenLandmarks_) - seenLandmarks_.index(label) - 1):
                H = np.bmat([[H, np.zeros([2, 2])]])

            measurementCovariance = np.array(measurementCovariance)
            try:
                S = np.array(
                    np.add(np.dot(np.dot(H, stateCovariance), np.transpose(H)),
                           measurementCovariance))
            except ValueError:
                print('Value error S')
                print 'H shape', H.shape
                print 'State Cov', stateCovariance.shape
                print 'measurement Cov', measurementCovariance.shape
                return

            if (S < 0.000001).all():
                print('Non-invertible S Matrix')
                raise ValueError
                return

            # calculate Kalman gain
            K = np.array(
                np.dot(np.dot(stateCovariance, np.transpose(H)),
                       np.linalg.inv(S)))

            # compute posterior mean
            posteriorStateMean = np.array(np.add(stateMean, np.dot(K, y)))

            # compute posterior covariance
            kc = np.array(np.dot(K, H))
            kcShape = len(kc)

            posteriorStateCovariance = np.dot(np.subtract(np.eye(kcShape), kc),
                                              stateCovariance)

            # check theta robot is a valid theta in the range [-pi, pi]
            posteriorStateMean[2][0] = pi2pi(posteriorStateMean[2][0])

            # update robot pose

            robotPose = ([posteriorStateMean[0][0]
                          ], [posteriorStateMean[1][0]],
                         [posteriorStateMean[2][0]])
            robotCovariance = posteriorStateCovariance[0:3, 0:3]

            # updated robot covariance
            if not (np.absolute(posteriorStateMean[0][0]) > 3.5
                    or np.absolute(posteriorStateMean[1][0]) > 3.5):
                stateMean = posteriorStateMean
                stateCovariance = posteriorStateCovariance
                # set robot pose
                self.robot.setPose(robotPose)
                # set robot covariance
                self.robot.setCovariance(robotCovariance)
                print 'IM DONEXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'

        # set posterior state mean
        self.stateMean = stateMean
        # set posterior state covariance
        self.stateCovariance = stateCovariance

        print 'Robot Pose:', currentRobotAbs
        vec = mapping(seenLandmarks_.index(label) + 1)
        landmark_abs_[int(label) - 1].append(
            [[np.around(stateMean[dimR_ + vec[0] - 1][0], 3)],
             [np.around(stateMean[dimR_ + vec[1] - 1][0], 3)]])
        seenLandmarksX_[int(label) - 1].append(
            np.around(stateMean[dimR_ + vec[0] - 1][0], 3))
        for i in range(0, len(landmark_abs_)):
            #count=Counter(seenLandmarksX_[i])
            print 'landmark absolute position : ', i + 1, ',', np.median(
                landmark_abs_[i], 0)  #count.most_common(1)

        print '____END______'
        return stateMean, stateCovariance
Example #36
0
def svdUpdate(U, S, V, a, b):
    """
    Update SVD of an (m x n) matrix `X = U * S * V^T` so that
    `[X + a * b^T] = U' * S' * V'^T`
    and return `U'`, `S'`, `V'`.
    
    The original matrix X is not needed at all, so this function implements one-pass
    streaming rank-1 updates to an existing decomposition. 
    
    `a` and `b` are (m, 1) and (n, 1) matrices.
    
    You can set V to None if you're not interested in the right singular
    vectors. In that case, the returned V' will also be None (saves memory).
    
    The blocked merge algorithm in LsiModel.addDocuments() is much faster; I keep this fnc here
    purely for backup reasons.

    This is the rank-1 update as described in
    **Brand, 2006: Fast low-rank modifications of the thin singular value decomposition**,
    but without separating the basis from rotations.
    """
    # convert input to matrices (no copies of data made if already numpy.ndarray or numpy.matrix)
    S = numpy.asmatrix(S)
    U = numpy.asmatrix(U)
    if V is not None:
        V = numpy.asmatrix(V)
    a = numpy.asmatrix(a).reshape(a.size, 1)
    b = numpy.asmatrix(b).reshape(b.size, 1)

    rank = S.shape[0]

    # eq (6)
    m = U.T * a
    p = a - U * m
    Ra = numpy.sqrt(p.T * p)
    if float(Ra) < 1e-10:
        logger.debug(
            "input already contained in a subspace of U; skipping update")
        return U, S, V
    P = (1.0 / float(Ra)) * p

    if V is not None:
        # eq (7)
        n = V.T * b
        q = b - V * n
        Rb = numpy.sqrt(q.T * q)
        if float(Rb) < 1e-10:
            logger.debug(
                "input already contained in a subspace of V; skipping update")
            return U, S, V
        Q = (1.0 / float(Rb)) * q
    else:
        n = numpy.matrix(numpy.zeros((rank, 1)))
        Rb = numpy.matrix([[1.0]])

    if float(Ra) > 1.0 or float(Rb) > 1.0:
        logger.debug(
            "insufficient target rank (Ra=%.3f, Rb=%.3f); this update will result in major loss of information"
            % (float(Ra), float(Rb)))

    # eq (8)
    K = numpy.matrix(
        numpy.diag(list(numpy.diag(S)) +
                   [0.0])) + numpy.bmat('m ; Ra') * numpy.bmat('n ; Rb').T

    # eq (5)
    u, s, vt = numpy.linalg.svd(K, full_matrices=False)
    tUp = numpy.matrix(u[:, :rank])
    tVp = numpy.matrix(vt.T[:, :rank])
    tSp = numpy.matrix(numpy.diag(s[:rank]))
    Up = numpy.bmat('U P') * tUp
    if V is not None:
        Vp = numpy.bmat('V Q') * tVp
    else:
        Vp = None
    Sp = tSp

    return Up, Sp, Vp
Example #37
0
def data():

    np.random.seed(12)

    # First View
    V1_joint = np.bmat([[-1 * np.ones((10, 20))], [np.ones((10, 20))]])

    V1_joint = np.bmat([np.zeros((20, 80)), V1_joint])

    V1_indiv_t = np.bmat([
        [np.ones((4, 50))],
        [-1 * np.ones((4, 50))],
        [np.zeros((4, 50))],
        [np.ones((4, 50))],
        [-1 * np.ones((4, 50))],
    ])

    V1_indiv_b = np.bmat([[np.ones((5, 50))], [-1 * np.ones((10, 50))],
                          [np.ones((5, 50))]])

    V1_indiv_tot = np.bmat([V1_indiv_t, V1_indiv_b])

    V1_noise = np.random.normal(loc=0, scale=1, size=(20, 100))

    # Second View
    V2_joint = np.bmat([[np.ones((10, 10))], [-1 * np.ones((10, 10))]])

    V2_joint = 5000 * np.bmat([V2_joint, np.zeros((20, 10))])

    V2_indiv = 5000 * np.bmat([
        [-1 * np.ones((5, 20))],
        [np.ones((5, 20))],
        [-1 * np.ones((5, 20))],
        [np.ones((5, 20))],
    ])

    V2_noise = 5000 * np.random.normal(loc=0, scale=1, size=(20, 20))

    # View Construction

    V1 = V1_indiv_tot + V1_joint + V1_noise

    V2 = V2_indiv + V2_joint + V2_noise

    # Creating Sparse views
    V1_sparse = np.array(np.zeros_like(V1))
    V2_sparse = np.array(np.zeros_like(V2))
    V1_sparse[0, 0] = 1
    V2_sparse[0, 0] = 3
    V1_Bad = csr_matrix([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
    V2_Bad = csr_matrix([[1, 2, 3], [7, 0, 3], [1, 2, 2]])

    Views_Same = [V1, V1]
    Views_Different = [V1, V2]
    Views_Sparse = [V1_sparse, V2_sparse]
    Views_Bad = [V1_Bad, V2_Bad]

    return {
        "same_views": Views_Same,
        "diff_views": Views_Different,
        "sparse_views": Views_Sparse,
        "bad_views": Views_Bad,
    }
Example #38
0
#  [[1 2]
#  [3 4]]
# a2:
#  [[0 0]
#  [0 0]]
# '''
# #组合
# m1 = np.bmat('a1 a2;a2 a1')
# '''
# m1:
#  [[1 2 0 0]
#  [3 4 0 0]
#  [0 0 1 2]
#  [0 0 3 4]]
# '''
# print('m1:\n', m1)
# print('m1:\n', type(m1))


# 使用bmat将二维数组转化为矩阵
m1 = np.bmat(a1)
print('m1:\n', m1)
print('m1:\n', type(m1))

# 注意:bmat不能转化列表和特殊字符串为矩阵
# m1 = np.bmat('1 2 3;4 5 6;7 8 9') # 错误的
# m1 = np.bmat([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) # 错误的
# print('m1:\n', m1)
# print('m1的类型:\n', type(m1))

Example #39
0
def form_update_from_force_direct(form, force):
    r"""Update the form diagram after a modification of the force diagram.

    Compute the geometry of the form diagram from the geometry of the force diagram
    and some constraints (location of fixed points).
    The form diagram is computed by formulating the reciprocal relationships to
    the approach in described in AGS. In order to include the constraints, the
    reciprocal force densities and form diagram coordinates are solved for at
    the same time, by formulating the equation system:

    .. math::

        \mathbf{M}\mathbf{X} = \mathbf{r}

    with :math:`\mathbf{M}` containing the coefficients of the system of equations
    including constraints, :math:`\mathbf{X}` the coordinates of the vertices of
    the form diagram and the reciprocal force densities, in *Fortran* order
    (first all :math:`\mathbf{x}`-coordinates, then all :math:`\mathbf{y}`-coordinates, then all reciprocal force
    densities, :math:`\mathbf{q}^{-1}`), and  :math:`\mathbf{r}` contains the residual (all zeroes except
    for the constraint rows).

    The addition of constraints reduces the number of independent edges, which
    must be identified during the solving procedure. Additionally, the algorithm
    fails if any force density is zero (corresponding to a zero-length edge in
    the force diagram) or if it is over-constrained.

    Parameters
    ----------
    form : compas_ags.diagrams.formdiagram.FormDiagram
        The form diagram to update.
    force : compas_bi_ags.diagrams.forcediagram.ForceDiagram
        The force diagram on which the update is based.

    """
    # --------------------------------------------------------------------------
    # form diagram
    # --------------------------------------------------------------------------
    k_i = form.key_index()
    # i_j = {i: [k_i[n] for n in form.vertex_neighbours(k)] for i, k in enumerate(form.vertices())}
    uv_e = form.uv_index()
    ij_e = {(k_i[u], k_i[v]): uv_e[(u, v)] for u, v in uv_e}
    edges = [(k_i[u], k_i[v]) for u, v in form.edges()]
    C = connectivity_matrix(edges, 'array')
    # add opposite edges for convenience...
    ij_e.update({(k_i[v], k_i[u]): uv_e[(u, v)] for u, v in uv_e})
    edges = [(k_i[u], k_i[v]) for u, v in form.edges()]
    xy = array(form.xy(), dtype=float64).reshape((-1, 2))
    q = array(form.q(), dtype=float64).reshape((-1, 1))
    # --------------------------------------------------------------------------
    # force diagram
    # --------------------------------------------------------------------------
    _i_k = {index: key for index, key in enumerate(force.vertices())}
    _xy = array(force.xy(), dtype=float64)
    _edges = force.ordered_edges(form)
    _uv_e = {(_i_k[i], _i_k[j]): e for e, (i, j) in enumerate(_edges)}
    _vcount = force.number_of_vertices()
    _C = connectivity_matrix(_edges, 'array')
    _e_v = force.external_vertices(form)
    _free = list(set(range(_vcount)) - set(_e_v))

    # --------------------------------------------------------------------------
    # compute the coordinates of the form based on the force diagram
    # with linear constraints
    # --------------------------------------------------------------------------

    # Compute dual equilibrium matrix and Laplacian matrix
    import numpy as np
    _E = equilibrium_matrix(_C, _xy, _free, 'array')
    L = laplacian_matrix(edges, normalize=False, rtype='array')

    # Get dual coordinate difference vectors
    _uv = _C.dot(_xy)
    _U = np.diag(_uv[:, 0])
    _V = np.diag(_uv[:, 1])

    # Get reciprocal force densities
    from compas_bi_ags.utilities.errorhandler import SolutionError
    if any(abs(q) < 1e-14):
        raise SolutionError(
            'Found zero force density, direct solution not possible.')
    q = np.divide(1, q)

    # Formulate the equation system
    z = np.zeros(L.shape)
    z2 = np.zeros((_E.shape[0], L.shape[1]))
    M = np.bmat([[L, z, -C.T.dot(_U)], [z, L, -C.T.dot(_V)], [z2, z2, _E]])
    rhs = np.zeros((M.shape[0], 1))
    X = np.vstack((matrix(xy)[:, 0], matrix(xy)[:, 1], matrix(q)[:, 0]))

    # Add constraints
    constraint_rows, res = force.compute_constraints(form, M)
    M = np.vstack((M, constraint_rows))
    rhs = np.vstack((rhs, res))

    # Get independent variables
    from compas_bi_ags.utilities.helpers import get_independent_stress, check_solutions
    nr_free_vars, free_vars, dependent_vars = get_independent_stress(M)
    #k, m  = dof(M)
    #ind   = nonpivots(rref(M))

    # Partition system
    Mid = M[:, free_vars]
    Md = M[:, dependent_vars]
    Xid = X[free_vars]

    # Check that solution exists
    check_solutions(M, rhs)

    # Solve
    Xd = np.asarray(np.linalg.lstsq(Md, rhs - Mid * Xid)[0])
    X[dependent_vars] = Xd

    # Store solution
    nx = xy.shape[0]
    ny = xy.shape[0]
    xy[:, 0] = X[:nx].T
    xy[:, 1] = X[nx:(nx + ny)].T

    # --------------------------------------------------------------------------
    # update
    # --------------------------------------------------------------------------
    uv = C.dot(xy)
    _uv = _C.dot(_xy)
    a = [angle_vectors_xy(uv[i], _uv[i]) for i in range(len(edges))]
    l = normrow(uv)
    _l = normrow(_uv)
    q = _l / l
    # --------------------------------------------------------------------------
    # update form diagram
    # --------------------------------------------------------------------------
    for key, attr in form.vertices(True):
        index = k_i[key]
        attr['x'] = xy[index, 0]
        attr['y'] = xy[index, 1]
    for u, v, attr in form.edges(True):
        e = uv_e[(u, v)]
        attr['l'] = l[e, 0]
        attr['a'] = a[e]
        if a[e] < 90:
            attr['f'] = _l[e, 0]
            attr['q'] = q[e, 0]
        else:
            attr['f'] = -_l[e, 0]
            attr['q'] = -q[e, 0]
    # --------------------------------------------------------------------------
    # update force diagram
    # --------------------------------------------------------------------------
    for u, v, attr in force.edges(True):
        e = _uv_e[(u, v)]
        attr['a'] = a[e]
        attr['l'] = _l[e, 0]
Example #40
0
# ## 7. numpy.bmat
# Build a matrix object from a string, nested sequence, or array.

# In[18]:


a = np.mat('1 1; 1 1')
b = np.mat('2 2; 2 2')
c = np.mat('3 3; 3 3')
d = np.mat('4 4; 5 5')


# In[19]:


np.bmat([[a,b],[c,d]])


# ## 8. numpy.array
# Create an array.

# In[20]:


hh=np.array([[100,99,98.0],[97,96,0]],dtype=int)
hh


# ## 9. numpy.asarray
# Convert the input to an array.
    def merge(self, other, decay=1.0):
        """
        Merge this Projection with another.

        The content of `other` is destroyed in the process, so pass this function a
        copy of `other` if you need it further.
        """
        if other.u is None:
            # the other projection is empty => do nothing
            return
        if self.u is None:
            # we are empty => result of merge is the other projection, whatever it is
            self.u = other.u.copy()
            self.s = other.s.copy()
            return
        if self.m != other.m:
            raise ValueError(
                "vector space mismatch: update is using %s features, expected %s"
                % (other.m, self.m))
        logger.info("merging projections: %s + %s", str(self.u.shape),
                    str(other.u.shape))
        m, n1, n2 = self.u.shape[0], self.u.shape[1], other.u.shape[1]
        # TODO Maybe keep the bases as elementary reflectors, without
        # forming explicit matrices with ORGQR.
        # The only operation we ever need is basis^T*basis ond basis*component.
        # But how to do that in scipy? And is it fast(er)?

        # find component of u2 orthogonal to u1
        logger.debug("constructing orthogonal component")
        self.u = asfarray(self.u, 'self.u')
        c = np.dot(self.u.T, other.u)
        self.u = ascarray(self.u, 'self.u')
        other.u -= np.dot(self.u, c)

        other.u = [
            other.u
        ]  # do some reference magic and call qr_destroy, to save RAM
        q, r = matutils.qr_destroy(other.u)  # q, r = QR(component)
        assert not other.u

        # find the rotation that diagonalizes r
        k = np.bmat([[np.diag(decay * self.s),
                      np.multiply(c, other.s)],
                     [
                         matutils.pad(
                             np.array([]).reshape(0, 0), min(m, n2), n1),
                         np.multiply(r, other.s)
                     ]])
        logger.debug("computing SVD of %s dense matrix", k.shape)
        try:
            # in np < 1.1.0, running SVD sometimes results in "LinAlgError: SVD did not converge'.
            # for these early versions of np, catch the error and try to compute
            # SVD again, but over k*k^T.
            # see http://www.mail-archive.com/[email protected]/msg07224.html and
            # bug ticket http://projects.scipy.org/np/ticket/706
            # sdoering: replaced np's linalg.svd with scipy's linalg.svd:

            # TODO *ugly overkill*!! only need first self.k SVD factors... but there is no LAPACK wrapper
            # for partial svd/eigendecomp in np :( //sdoering: maybe there is one in scipy?
            u_k, s_k, _ = scipy.linalg.svd(k, full_matrices=False)
        except scipy.linalg.LinAlgError:
            logger.error("SVD(A) failed; trying SVD(A * A^T)")
            # if this fails too, give up with an exception
            u_k, s_k, _ = scipy.linalg.svd(np.dot(k, k.T), full_matrices=False)
            s_k = np.sqrt(s_k)  # go back from eigen values to singular values

        k = clip_spectrum(s_k**2, self.k)
        u1_k, u2_k, s_k = np.array(u_k[:n1, :k]), np.array(
            u_k[n1:, :k]), s_k[:k]

        # update & rotate current basis U = [U, U']*[U1_k, U2_k]
        logger.debug("updating orthonormal basis U")
        self.s = s_k
        self.u = ascarray(self.u, 'self.u')
        self.u = np.dot(self.u, u1_k)

        q = ascarray(q, 'q')
        q = np.dot(q, u2_k)
        self.u += q

        # make each column of U start with a non-negative number (to force canonical decomposition)
        if self.u.shape[0] > 0:
            for i in xrange(self.u.shape[1]):
                if self.u[0, i] < 0.0:
                    self.u[:, i] *= -1.0
Example #42
0
    def __compute_sufficient_statistics_given_observations__(
            self, machine, observations):
        """
    We compute the expected values of the latent variables given the observations
    and parameters of the model.

    First order or the expected value of the latent variables.:
      F = (I+A^{T}\Sigma'^{-1}A)^{-1} * A^{T}\Sigma^{-1} (\tilde{x}_{s}-\mu').
    Second order stats:
      S = (I+A^{T}\Sigma'^{-1}A)^{-1} + (F*F^{T}).
    """

        # Get the number of observations
        J_i = observations.shape[0]  # An integer > 0
        dim_d = observations.shape[1]  # A scalar
        # Useful values
        mu = machine.mu
        F = machine.f
        G = machine.g
        sigma = machine.sigma
        isigma = machine.__isigma__
        alpha = machine.__alpha__
        ft_beta = machine.__ft_beta__
        gamma = machine.get_add_gamma(J_i)
        # Normalise the observations
        normalised_observations = observations - numpy.tile(
            mu, [J_i, 1])  # (dim_d, J_i)

        ### Expected value of the latent variables using the scalable solution
        # Identity part first
        sum_ft_beta_part = numpy.zeros(self.m_dim_f)  # (dim_f)
        for j in range(0, J_i):
            current_observation = normalised_observations[j, :]  # (dim_d)
            sum_ft_beta_part = sum_ft_beta_part + numpy.dot(
                ft_beta, current_observation)  # (dim_f)
        h_i = numpy.dot(gamma, sum_ft_beta_part)  # (dim_f)
        # Reproject the identity part to work out the session parts
        Fh_i = numpy.dot(F, h_i)  # (dim_d)
        z_first_order = numpy.zeros((J_i, self.m_dim_f + self.m_dim_g))
        for j in range(0, J_i):
            current_observation = normalised_observations[j, :]  # (dim_d)
            w_ij = numpy.dot(alpha, G.transpose())  # (dim_g, dim_d)
            w_ij = numpy.multiply(w_ij, isigma)  # (dim_g, dim_d)
            w_ij = numpy.dot(w_ij, (current_observation - Fh_i))  # (dim_g)
            z_first_order[j, :] = numpy.hstack([h_i, w_ij])  # (dim_f+dim_g)

        ### Calculate the expected value of the squared of the latent variables
        # The constant matrix we use has the following parts: [top_left, top_right; bottom_left, bottom_right]
        # P             = Inverse_I_plus_GTEG * G^T * Sigma^{-1} * F  (dim_g, dim_f)
        # top_left      = gamma                                       (dim_f, dim_f)
        # bottom_left   = top_right^T = P * gamma                     (dim_g, dim_f)
        # bottom_right  = Inverse_I_plus_GTEG - bottom_left * P^T     (dim_g, dim_g)
        top_left = gamma
        P = numpy.dot(alpha, G.transpose())
        P = numpy.dot(numpy.dot(P, numpy.diag(isigma)), F)
        bottom_left = -1 * numpy.dot(P, top_left)
        top_right = bottom_left.transpose()
        bottom_right = alpha - 1 * numpy.dot(bottom_left, P.transpose())
        constant_matrix = numpy.bmat([[top_left, top_right],
                                      [bottom_left, bottom_right]])

        # Now get the actual expected value
        z_second_order = numpy.zeros(
            (J_i, self.m_dim_f + self.m_dim_g, self.m_dim_f + self.m_dim_g))
        for j in range(0, J_i):
            z_second_order[j, :, :] = constant_matrix + numpy.outer(
                z_first_order[j, :],
                z_first_order[j, :])  # (dim_f+dim_g,dim_f+dim_g)

        ### Return the first and second order statistics
        return (z_first_order, z_second_order)
Example #43
0
clones = {
  'star': [ 'AstarT', 'BstarT', 'CstarT' ]
}

# Load matrices
db = Tools.parseMatrixFile('{}/matrices_{}.xml'.format(cmdLineArgs.matricesDir, numberOfBasisFunctions), clones)
db.update(Tools.parseMatrixFile('{}/matrices_viscoelastic.xml'.format(cmdLineArgs.matricesDir), clones))

clonesQP = {
  'v': [ 'evalAtQP' ],
  'vInv': [ 'projectQP' ]
}
db.update( Tools.parseMatrixFile('{}/plasticity_ip_matrices_{}.xml'.format(cmdLineArgs.matricesDir, order), clonesQP))

# Determine sparsity patterns that depend on the number of mechanisms
riemannSolverSpp = np.bmat([[np.matlib.ones((9, numberOfReducedQuantities), dtype=np.float64)], [np.matlib.zeros((numberOfReducedQuantities-9, numberOfReducedQuantities), dtype=np.float64)]])
db.insert(DB.MatrixInfo('AplusT', numberOfReducedQuantities, numberOfReducedQuantities, matrix=riemannSolverSpp))
db.insert(DB.MatrixInfo('AminusT', numberOfReducedQuantities, numberOfReducedQuantities, matrix=riemannSolverSpp))

DynamicRupture.addMatrices(db, cmdLineArgs.matricesDir, order, cmdLineArgs.dynamicRuptureMethod, numberOfElasticQuantities, numberOfReducedQuantities)
Plasticity.addMatrices(db, cmdLineArgs.matricesDir, cmdLineArgs.PlasticityMethod, order)
SurfaceDisplacement.addMatrices(db, order)

# Load sparse-, dense-, block-dense-config
Tools.memoryLayoutFromFile(cmdLineArgs.memLayout, db, clones)

# Set rules for the global matrix memory order
stiffnessOrder = { 'Xi': 0, 'Eta': 1, 'Zeta': 2 }
vMatrixOrder = { 'v': 0, 'vInv': 1 }
globalMatrixIdRules = [
  (r'^k(Xi|Eta|Zeta)DivMT$', lambda x: stiffnessOrder[x[0]]),
Example #44
0
def lobpcg(A,
           X,
           B=None,
           M=None,
           Y=None,
           tol=None,
           maxiter=20,
           largest=True,
           verbosityLevel=0,
           retLambdaHistory=False,
           retResidualNormsHistory=False):
    """Solve symmetric partial eigenproblems with optional preconditioning

    This function implements the Locally Optimal Block Preconditioned
    Conjugate Gradient Method (LOBPCG).

    Parameters
    ----------
    A : {sparse matrix, dense matrix, LinearOperator}
        The symmetric linear operator of the problem, usually a
        sparse matrix.  Often called the "stiffness matrix".
    X : array_like
        Initial approximation to the k eigenvectors. If A has
        shape=(n,n) then X should have shape shape=(n,k).
    B : {dense matrix, sparse matrix, LinearOperator}, optional
        the right hand side operator in a generalized eigenproblem.
        by default, B = Identity
        often called the "mass matrix"
    M : {dense matrix, sparse matrix, LinearOperator}, optional
        preconditioner to A; by default M = Identity
        M should approximate the inverse of A
    Y : array_like, optional
        n-by-sizeY matrix of constraints, sizeY < n
        The iterations will be performed in the B-orthogonal complement
        of the column-space of Y. Y must be full rank.

    Returns
    -------
    w : array
        Array of k eigenvalues
    v : array
        An array of k eigenvectors.  V has the same shape as X.

    Other Parameters
    ----------------
    tol : scalar, optional
        Solver tolerance (stopping criterion)
        by default: tol=n*sqrt(eps)
    maxiter : integer, optional
        maximum number of iterations
        by default: maxiter=min(n,20)
    largest : boolean, optional
        when True, solve for the largest eigenvalues, otherwise the smallest
    verbosityLevel : integer, optional
        controls solver output.  default: verbosityLevel = 0.
    retLambdaHistory : boolean, optional
        whether to return eigenvalue history
    retResidualNormsHistory : boolean, optional
        whether to return history of residual norms


    Notes
    -----
    If both retLambdaHistory and retResidualNormsHistory are True, the
    return tuple has the following format
    (lambda, V, lambda history, residual norms history)

    """
    failureFlag = True
    import scipy.linalg as sla

    blockVectorX = X
    blockVectorY = Y
    residualTolerance = tol
    maxIterations = maxiter

    if blockVectorY is not None:
        sizeY = blockVectorY.shape[1]
    else:
        sizeY = 0

    # Block size.
    if len(blockVectorX.shape) != 2:
        raise ValueError('expected rank-2 array for argument X')

    n, sizeX = blockVectorX.shape
    if sizeX > n:
        raise ValueError('X column dimension exceeds the row dimension')

    A = makeOperator(A, (n, n))
    B = makeOperator(B, (n, n))
    M = makeOperator(M, (n, n))

    if (n - sizeY) < (5 * sizeX):
        # warn('The problem size is small compared to the block size.' \
        #        ' Using dense eigensolver instead of LOBPCG.')

        if blockVectorY is not None:
            raise NotImplementedError('symeig does not support constraints')

        if largest:
            lohi = (n - sizeX, n)
        else:
            lohi = (1, sizeX)

        A_dense = A(np.eye(n))

        if B is not None:
            B_dense = B(np.eye(n))
            _lambda, eigBlockVector = symeig(A_dense, B_dense, select=lohi)
        else:
            _lambda, eigBlockVector = symeig(A_dense, select=lohi)

        return _lambda, eigBlockVector

    if residualTolerance is None:
        residualTolerance = np.sqrt(1e-15) * n

    maxIterations = min(n, maxIterations)

    if verbosityLevel:
        aux = "Solving "
        if B is None:
            aux += "standard"
        else:
            aux += "generalized"
        aux += " eigenvalue problem with"
        if M is None:
            aux += "out"
        aux += " preconditioning\n\n"
        aux += "matrix size %d\n" % n
        aux += "block size %d\n\n" % sizeX
        if blockVectorY is None:
            aux += "No constraints\n\n"
        else:
            if sizeY > 1:
                aux += "%d constraints\n\n" % sizeY
            else:
                aux += "%d constraint\n\n" % sizeY
        print(aux)

    ##
    # Apply constraints to X.
    if blockVectorY is not None:

        if B is not None:
            blockVectorBY = B(blockVectorY)
        else:
            blockVectorBY = blockVectorY

        # gramYBY is a dense array.
        gramYBY = sp.dot(blockVectorY.T, blockVectorBY)
        try:
            # gramYBY is a Cholesky factor from now on...
            gramYBY = sla.cho_factor(gramYBY)
        except:
            raise ValueError('cannot handle linearly dependent constraints')

        applyConstraints(blockVectorX, gramYBY, blockVectorBY, blockVectorY)

    ##
    # B-orthonormalize X.
    blockVectorX, blockVectorBX = b_orthonormalize(B, blockVectorX)

    ##
    # Compute the initial Ritz vectors: solve the eigenproblem.
    blockVectorAX = A(blockVectorX)
    gramXAX = sp.dot(blockVectorX.T, blockVectorAX)
    # gramXBX is X^T * X.
    gramXBX = sp.dot(blockVectorX.T, blockVectorX)

    _lambda, eigBlockVector = symeig(gramXAX)
    ii = np.argsort(_lambda)[:sizeX]
    if largest:
        ii = ii[::-1]
    _lambda = _lambda[ii]

    eigBlockVector = np.asarray(eigBlockVector[:, ii])
    blockVectorX = sp.dot(blockVectorX, eigBlockVector)
    blockVectorAX = sp.dot(blockVectorAX, eigBlockVector)
    if B is not None:
        blockVectorBX = sp.dot(blockVectorBX, eigBlockVector)

    ##
    # Active index set.
    activeMask = np.ones((sizeX, ), dtype=np.bool)

    lambdaHistory = [_lambda]
    residualNormsHistory = []

    previousBlockSize = sizeX
    ident = np.eye(sizeX, dtype=A.dtype)
    ident0 = np.eye(sizeX, dtype=A.dtype)

    ##
    # Main iteration loop.
    for iterationNumber in xrange(maxIterations):
        if verbosityLevel > 0:
            print('iteration %d' % iterationNumber)

        aux = blockVectorBX * _lambda[np.newaxis, :]
        blockVectorR = blockVectorAX - aux

        aux = np.sum(blockVectorR.conjugate() * blockVectorR, 0)
        residualNorms = np.sqrt(aux)

        residualNormsHistory.append(residualNorms)

        ii = np.where(residualNorms > residualTolerance, True, False)
        activeMask = activeMask & ii
        if verbosityLevel > 2:
            print(activeMask)

        currentBlockSize = activeMask.sum()
        if currentBlockSize != previousBlockSize:
            previousBlockSize = currentBlockSize
            ident = np.eye(currentBlockSize, dtype=A.dtype)

        if currentBlockSize == 0:
            failureFlag = False  # All eigenpairs converged.
            break

        if verbosityLevel > 0:
            print('current block size:', currentBlockSize)
            print('eigenvalue:', _lambda)
            print('residual norms:', residualNorms)
        if verbosityLevel > 10:
            print(eigBlockVector)

        activeBlockVectorR = as2d(blockVectorR[:, activeMask])

        if iterationNumber > 0:
            activeBlockVectorP = as2d(blockVectorP[:, activeMask])
            activeBlockVectorAP = as2d(blockVectorAP[:, activeMask])
            activeBlockVectorBP = as2d(blockVectorBP[:, activeMask])

        if M is not None:
            # Apply preconditioner T to the active residuals.
            activeBlockVectorR = M(activeBlockVectorR)

        ##
        # Apply constraints to the preconditioned residuals.
        if blockVectorY is not None:
            applyConstraints(activeBlockVectorR, gramYBY, blockVectorBY,
                             blockVectorY)

        ##
        # B-orthonormalize the preconditioned residuals.

        aux = b_orthonormalize(B, activeBlockVectorR)
        activeBlockVectorR, activeBlockVectorBR = aux

        activeBlockVectorAR = A(activeBlockVectorR)

        if iterationNumber > 0:
            aux = b_orthonormalize(B,
                                   activeBlockVectorP,
                                   activeBlockVectorBP,
                                   retInvR=True)
            activeBlockVectorP, activeBlockVectorBP, invR = aux
            activeBlockVectorAP = sp.dot(activeBlockVectorAP, invR)

        ##
        # Perform the Rayleigh Ritz Procedure:
        # Compute symmetric Gram matrices:

        xaw = sp.dot(blockVectorX.T, activeBlockVectorAR)
        waw = sp.dot(activeBlockVectorR.T, activeBlockVectorAR)
        xbw = sp.dot(blockVectorX.T, activeBlockVectorBR)

        if iterationNumber > 0:
            xap = sp.dot(blockVectorX.T, activeBlockVectorAP)
            wap = sp.dot(activeBlockVectorR.T, activeBlockVectorAP)
            pap = sp.dot(activeBlockVectorP.T, activeBlockVectorAP)
            xbp = sp.dot(blockVectorX.T, activeBlockVectorBP)
            wbp = sp.dot(activeBlockVectorR.T, activeBlockVectorBP)

            gramA = np.bmat([[np.diag(_lambda), xaw, xap], [xaw.T, waw, wap],
                             [xap.T, wap.T, pap]])

            gramB = np.bmat([[ident0, xbw, xbp], [xbw.T, ident, wbp],
                             [xbp.T, wbp.T, ident]])
        else:
            gramA = np.bmat([[np.diag(_lambda), xaw], [xaw.T, waw]])
            gramB = np.bmat([[ident0, xbw], [xbw.T, ident]])

        try:
            assert np.allclose(gramA.T, gramA)
        except:
            print(gramA.T - gramA)
            raise

        try:
            assert np.allclose(gramB.T, gramB)
        except:
            print(gramB.T - gramB)
            raise

        if verbosityLevel > 10:
            save(gramA, 'gramA')
            save(gramB, 'gramB')

        ##
        # Solve the generalized eigenvalue problem.
#        _lambda, eigBlockVector = la.eig( gramA, gramB )
        _lambda, eigBlockVector = symeig(gramA, gramB)
        ii = np.argsort(_lambda)[:sizeX]
        if largest:
            ii = ii[::-1]
        if verbosityLevel > 10:
            print(ii)

        _lambda = _lambda[ii].astype(np.float64)
        eigBlockVector = np.asarray(eigBlockVector[:, ii].astype(np.float64))

        lambdaHistory.append(_lambda)

        if verbosityLevel > 10:
            print('lambda:', _lambda)
##         # Normalize eigenvectors!
##         aux = np.sum( eigBlockVector.conjugate() * eigBlockVector, 0 )
##         eigVecNorms = np.sqrt( aux )
##         eigBlockVector = eigBlockVector / eigVecNorms[np.newaxis,:]
#        eigBlockVector, aux = b_orthonormalize( B, eigBlockVector )

        if verbosityLevel > 10:
            print(eigBlockVector)
            pause()

        ##
        # Compute Ritz vectors.
        if iterationNumber > 0:
            eigBlockVectorX = eigBlockVector[:sizeX]
            eigBlockVectorR = eigBlockVector[sizeX:sizeX + currentBlockSize]
            eigBlockVectorP = eigBlockVector[sizeX + currentBlockSize:]

            pp = sp.dot(activeBlockVectorR, eigBlockVectorR)
            pp += sp.dot(activeBlockVectorP, eigBlockVectorP)

            app = sp.dot(activeBlockVectorAR, eigBlockVectorR)
            app += sp.dot(activeBlockVectorAP, eigBlockVectorP)

            bpp = sp.dot(activeBlockVectorBR, eigBlockVectorR)
            bpp += sp.dot(activeBlockVectorBP, eigBlockVectorP)
        else:
            eigBlockVectorX = eigBlockVector[:sizeX]
            eigBlockVectorR = eigBlockVector[sizeX:]

            pp = sp.dot(activeBlockVectorR, eigBlockVectorR)
            app = sp.dot(activeBlockVectorAR, eigBlockVectorR)
            bpp = sp.dot(activeBlockVectorBR, eigBlockVectorR)

        if verbosityLevel > 10:
            print(pp)
            print(app)
            print(bpp)
            pause()

        blockVectorX = sp.dot(blockVectorX, eigBlockVectorX) + pp
        blockVectorAX = sp.dot(blockVectorAX, eigBlockVectorX) + app
        blockVectorBX = sp.dot(blockVectorBX, eigBlockVectorX) + bpp

        blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp

    aux = blockVectorBX * _lambda[np.newaxis, :]
    blockVectorR = blockVectorAX - aux

    aux = np.sum(blockVectorR.conjugate() * blockVectorR, 0)
    residualNorms = np.sqrt(aux)

    if verbosityLevel > 0:
        print('final eigenvalue:', _lambda)
        print('final residual norms:', residualNorms)

    if retLambdaHistory:
        if retResidualNormsHistory:
            return _lambda, blockVectorX, lambdaHistory, residualNormsHistory
        else:
            return _lambda, blockVectorX, lambdaHistory
    else:
        if retResidualNormsHistory:
            return _lambda, blockVectorX, residualNormsHistory
        else:
            return _lambda, blockVectorX
Example #45
0
    def calcDiff(self, data, x, u=None, recalc=True):
        ndx, nu, nv, dt = self.ndx, self.nu, self.nv, self.timeStep
        if recalc:
            self.calc(data, x, u)
        for i in range(4):
            self.differential.calcDiff(data.differential[i],
                                       data.y[i],
                                       u,
                                       recalc=False)
            data.dki_dy[i] = np.bmat([[np.zeros([nv, nv]),
                                       np.identity(nv)],
                                      [data.differential[i].Fx]])

        data.dki_du[0] = np.vstack(
            [np.zeros([nv, nu]), data.differential[0].Fu])

        data.Lx[:] = data.differential[0].Lx
        data.Lu[:] = data.differential[0].Lu

        data.dy_dx[0] = np.identity(nv * 2)
        data.dy_du[0] = np.zeros((ndx, nu))
        data.dki_dx[0] = data.dki_dy[0]

        data.dli_dx[0] = data.differential[0].Lx
        data.dli_du[0] = data.differential[0].Lu

        data.ddli_ddx[0] = data.differential[0].Lxx
        data.ddli_ddu[0] = data.differential[0].Luu
        data.ddli_dxdu[0] = data.differential[0].Lxu

        for i in range(1, 4):
            c = self.rk4_inc[i - 1] * dt
            dyi_dx, dyi_ddx = self.State.Jintegrate(x, c * data.ki[i - 1])

            # ---------Finding the derivative wrt u--------------
            data.dy_du[i] = c * np.dot(dyi_ddx, data.dki_du[i - 1])
            data.dki_du[i] = np.vstack([
                c * data.dki_du[i - 1][nv:, :], data.differential[i].Fu +
                np.dot(data.differential[i].Fx, data.dy_du[i])
            ])

            data.dli_du[i] = data.differential[i].Lu + np.dot(
                data.differential[i].Lx, data.dy_du[i])

            data.Luu_partialx[i] = np.dot(data.differential[i].Lxu.T,
                                          data.dy_du[i])
            data.ddli_ddu[i] = data.differential[i].Luu + data.Luu_partialx[
                i].T + data.Luu_partialx[i] + np.dot(
                    data.dy_du[i].T,
                    np.dot(data.differential[i].Lxx, data.dy_du[i]))

            # ---------Finding the derivative wrt x--------------
            data.dy_dx[i] = dyi_dx + c * np.dot(dyi_ddx, data.dki_dx[i - 1])
            data.dki_dx[i] = np.dot(data.dki_dy[i], data.dy_dx[i])

            data.dli_dx[i] = np.dot(data.differential[i].Lx, data.dy_dx[i])
            data.ddli_ddx[i] = np.dot(
                data.dy_dx[i].T, np.dot(data.differential[i].Lxx,
                                        data.dy_dx[i]))
            data.ddli_dxdu[i] = np.dot(
                data.dy_dx[i].T, data.differential[i].Lxu) + np.dot(
                    data.dy_dx[i].T,
                    np.dot(data.differential[i].Lxx, data.dy_du[i]))

        dxnext_dx, dxnext_ddx = self.State.Jintegrate(x, data.dx)
        ddx_dx = (data.dki_dx[0] + 2. * data.dki_dx[1] + 2. * data.dki_dx[2] +
                  data.dki_dx[3]) * dt / 6
        data.ddx_du = (data.dki_du[0] + 2. * data.dki_du[1] +
                       2. * data.dki_du[2] + data.dki_du[3]) * dt / 6
        data.Fx[:] = dxnext_dx + np.dot(dxnext_ddx, ddx_dx)
        data.Fu[:] = np.dot(dxnext_ddx, data.ddx_du)

        data.Lx[:] = (data.dli_dx[0] + 2. * data.dli_dx[1] +
                      2. * data.dli_dx[2] + data.dli_dx[3]) / 6
        data.Lu[:] = (data.dli_du[0] + 2. * data.dli_du[1] +
                      2. * data.dli_du[2] + data.dli_du[3]) / 6

        data.Lxx[:] = (data.ddli_ddx[0] + 2. * data.ddli_ddx[1] +
                       2. * data.ddli_ddx[2] + data.ddli_ddx[3]) / 6
        data.Luu[:] = (data.ddli_ddu[0] + 2. * data.ddli_ddu[1] +
                       2. * data.ddli_ddu[2] + data.ddli_ddu[3]) / 6
        data.Lxu[:] = (data.ddli_dxdu[0] + 2. * data.ddli_dxdu[1] +
                       2. * data.ddli_dxdu[2] + data.ddli_dxdu[3]) / 6
        data.Lux = data.Lxu.T
Example #46
0
def c_to_r_mat(M):
    # complex to real isomorphism for matrix
    return np.asarray(np.bmat([[M.real, -M.imag], [M.imag, M.real]]))
Example #47
0
    def eval_reactions_eq(self):
        config = self.config
        t = self.t

        Q_rbs_body_jcs_trans = (-1) * multi_dot([
            np.bmat([[
                Z1x3.T, Z1x3.T,
                multi_dot([
                    A(self.P_rbs_body), self.Mbar_rbs_body_jcs_trans[:, 0:1]
                ]),
                multi_dot([
                    A(self.P_rbs_body), self.Mbar_rbs_body_jcs_trans[:, 1:2]
                ]), Z1x3.T
            ],
                     [
                         multi_dot([
                             B(self.P_rbs_body,
                               self.Mbar_rbs_body_jcs_trans[:, 0:1]).T,
                             A(self.P_ground), self.Mbar_ground_jcs_trans[:,
                                                                          2:3]
                         ]),
                         multi_dot([
                             B(self.P_rbs_body,
                               self.Mbar_rbs_body_jcs_trans[:, 1:2]).T,
                             A(self.P_ground), self.Mbar_ground_jcs_trans[:,
                                                                          2:3]
                         ]),
                         (multi_dot([
                             B(self.P_rbs_body,
                               self.Mbar_rbs_body_jcs_trans[:, 0:1]).T,
                             ((-1) * self.R_ground + multi_dot([
                                 A(self.P_rbs_body),
                                 self.ubar_rbs_body_jcs_trans
                             ]) + (-1) * multi_dot([
                                 A(self.P_ground), self.ubar_ground_jcs_trans
                             ]) + self.R_rbs_body)
                         ]) + multi_dot([
                             B(self.P_rbs_body,
                               self.ubar_rbs_body_jcs_trans).T,
                             A(self.P_rbs_body),
                             self.Mbar_rbs_body_jcs_trans[:, 0:1]
                         ])),
                         (multi_dot([
                             B(self.P_rbs_body,
                               self.Mbar_rbs_body_jcs_trans[:, 1:2]).T,
                             ((-1) * self.R_ground + multi_dot([
                                 A(self.P_rbs_body),
                                 self.ubar_rbs_body_jcs_trans
                             ]) + (-1) * multi_dot([
                                 A(self.P_ground), self.ubar_ground_jcs_trans
                             ]) + self.R_rbs_body)
                         ]) + multi_dot([
                             B(self.P_rbs_body,
                               self.ubar_rbs_body_jcs_trans).T,
                             A(self.P_rbs_body),
                             self.Mbar_rbs_body_jcs_trans[:, 1:2]
                         ])),
                         multi_dot([
                             B(self.P_rbs_body,
                               self.Mbar_rbs_body_jcs_trans[:, 0:1]).T,
                             A(self.P_ground), self.Mbar_ground_jcs_trans[:,
                                                                          1:2]
                         ])
                     ]]), self.L_jcs_trans
        ])
        self.F_rbs_body_jcs_trans = Q_rbs_body_jcs_trans[0:3]
        Te_rbs_body_jcs_trans = Q_rbs_body_jcs_trans[3:7]
        self.T_rbs_body_jcs_trans = ((-1) * multi_dot([
            skew(multi_dot([A(self.P_rbs_body), self.ubar_rbs_body_jcs_trans
                            ])), self.F_rbs_body_jcs_trans
        ]) + (0.5) * multi_dot([E(self.P_rbs_body), Te_rbs_body_jcs_trans]))
        self.F_rbs_body_fas_TSDA = (1.0 / ((multi_dot([
            ((-1) * self.R_ground.T +
             multi_dot([self.ubar_rbs_body_fas_TSDA.T,
                        A(self.P_rbs_body).T]) + (-1) *
             multi_dot([self.ubar_ground_fas_TSDA.T,
                        A(self.P_ground).T]) + self.R_rbs_body.T),
            ((-1) * self.R_ground +
             multi_dot([A(self.P_rbs_body), self.ubar_rbs_body_fas_TSDA]) +
             (-1) * multi_dot([A(self.P_ground), self.ubar_ground_fas_TSDA]) +
             self.R_rbs_body)
        ]))**(1.0 / 2.0))[0] * (config.UF_fas_TSDA_Fd((-1 * 1.0 / ((multi_dot([
            ((-1) * self.R_ground.T +
             multi_dot([self.ubar_rbs_body_fas_TSDA.T,
                        A(self.P_rbs_body).T]) + (-1) *
             multi_dot([self.ubar_ground_fas_TSDA.T,
                        A(self.P_ground).T]) + self.R_rbs_body.T),
            ((-1) * self.R_ground +
             multi_dot([A(self.P_rbs_body), self.ubar_rbs_body_fas_TSDA]) +
             (-1) * multi_dot([A(self.P_ground), self.ubar_ground_fas_TSDA]) +
             self.R_rbs_body)
        ]))**(1.0 / 2.0))[0]) * multi_dot([
            ((-1) * self.R_ground.T +
             multi_dot([self.ubar_rbs_body_fas_TSDA.T,
                        A(self.P_rbs_body).T]) + (-1) *
             multi_dot([self.ubar_ground_fas_TSDA.T,
                        A(self.P_ground).T]) + self.R_rbs_body.T),
            ((-1) * self.Rd_ground + multi_dot([
                B(self.P_rbs_body, self.ubar_rbs_body_fas_TSDA),
                self.Pd_rbs_body
            ]) + (-1) * multi_dot(
                [B(self.P_ground, self.ubar_ground_fas_TSDA), self.Pd_ground])
             + self.Rd_rbs_body)
        ])) + config.UF_fas_TSDA_Fs((config.fas_TSDA_FL + (-1 * ((multi_dot([
            ((-1) * self.R_ground.T +
             multi_dot([self.ubar_rbs_body_fas_TSDA.T,
                        A(self.P_rbs_body).T]) + (-1) *
             multi_dot([self.ubar_ground_fas_TSDA.T,
                        A(self.P_ground).T]) + self.R_rbs_body.T),
            ((-1) * self.R_ground +
             multi_dot([A(self.P_rbs_body), self.ubar_rbs_body_fas_TSDA]) +
             (-1) * multi_dot([A(self.P_ground), self.ubar_ground_fas_TSDA]) +
             self.R_rbs_body)
        ]))**(1.0 / 2.0))[0]))))) * (
            (-1) * self.R_ground +
            multi_dot([A(self.P_rbs_body), self.ubar_rbs_body_fas_TSDA]) +
            (-1) * multi_dot([A(self.P_ground), self.ubar_ground_fas_TSDA]) +
            self.R_rbs_body)
        self.T_rbs_body_fas_TSDA = Z3x1

        self.reactions = {
            'F_rbs_body_jcs_trans': self.F_rbs_body_jcs_trans,
            'T_rbs_body_jcs_trans': self.T_rbs_body_jcs_trans,
            'F_rbs_body_fas_TSDA': self.F_rbs_body_fas_TSDA,
            'T_rbs_body_fas_TSDA': self.T_rbs_body_fas_TSDA
        }
def makeBlockMat(x): 
        zero = np.zeros([x.shape[0], x.shape[1]])
        blockmat = np.bmat([ [D*x, zero], [zero, D*x] ])
        return np.array(blockmat)
Example #49
0
def least_squares_directional_derivative_matrix(points, derivative_direction,
                                                a_reg=1e-8, num_neighbors=10,
                                                num_angles=5, num_frequencies=4,
                                                s_min=2, s_max=20):
    N, d = points.shape

    uhat = derivative_direction / np.linalg.norm(derivative_direction)

    T = cKDTree(points)
    neighbor_distances, neighbor_inds = T.query(points, num_neighbors)

    rows = list() # [0,    0,    0,     1,    1,    1,     2,    2,    2,     ... ]
    cols = list() # [p0_n0,p0_n1,p0_n2, p1_n0,p1_n1,p1_n2, p2_n0,p2_n1,p2_n2, ... ]
    values = list()
    for r in range(N): # for each row
        cc = neighbor_inds[r, :] # numpy array of ints. shape = (num_nbrs,)
        rows += [r for _ in range(len(cc))]
        cols += list(cc)

        pr = points[r, :].reshape((1, -1))
        pp_nbrs = points[cc, :] # numpy array of floats. shape = (num_nbrs, spatial_dimension)
        dd_nbrs = neighbor_distances[r, :] # numpy array of floats. shape = (num_nbrs,)
        local_pointcloud_diameter = 2.0 * np.max(dd_nbrs)
        min_L = s_min * local_pointcloud_diameter
        max_L = s_max * local_pointcloud_diameter

        theta0 = np.arctan(uhat[1] / uhat[0])
        thetas = theta0 + np.linspace(0, 2.*np.pi, num_angles, endpoint=False)
        vhats = np.zeros((num_angles, d))
        vhats[:, 0] = np.cos(thetas)
        vhats[:, 1] = np.sin(thetas)

        omegas = 1. / np.linspace(min_L, max_L, num_frequencies)

        X = np.zeros((num_neighbors, num_angles, num_frequencies, 2))
        DXr = np.zeros((num_angles, num_frequencies,2))
        for ii in range(num_angles):
            for jj in range(num_frequencies):
                vhat = vhats[ii,:]
                omega = omegas[jj]
                X_ij = complex_exponential(vhat, omega, pp_nbrs) / omega
                X[:, ii, jj, 0] = X_ij.real
                X[:, ii, jj, 1] = X_ij.imag

                DXr_ij = directional_derivative_of_complex_exponential(vhat, omega, pr, derivative_direction) / omega
                DXr[ii, jj, 0] = DXr_ij.real
                DXr[ii, jj, 1] = DXr_ij.imag

        X = X.reshape((num_neighbors, -1))
        DXr = DXr.reshape(-1)

        A = np.bmat([[X.T], [a_reg*np.eye(len(cc))]])
        b = np.concatenate([DXr, np.zeros(len(cc))])

        q = np.linalg.lstsq(A, b, rcond=None)[0]  # min 0.5*||q^T*X - DXr||^2 + a_reg*0.5*||q||^2
        values += list(q)

    rows = np.array(rows)
    cols = np.array(cols)
    values = np.array(values)

    D = sps.coo_matrix((values, (rows, cols)), shape=(N, N)).tocsr()

    return D
Example #50
0
    def merge(self, other, decay=1.0):
        """
        Merge this Projection with another. 
        
        The content of `other` is destroyed in the process, so pass this function a 
        copy of `other` if you need it further.
        """
        if other.u is None:
            # the other projection is empty => do nothing
            return
        if self.u is None:
            # we are empty => result of merge is the other projection, whatever it is
            if other.s is None:
                # other.u contains a direct document chunk, not svd => perform svd
                docs = other.u
                assert scipy.sparse.issparse(docs)
                if self.m * self.k < 10000:
                    # SVDLIBC gives spurious results for small matrices.. run full
                    # LAPACK on them instead
                    logger.info("computing dense SVD of %s matrix" %
                                str(docs.shape))
                    u, s, vt = numpy.linalg.svd(docs.todense(),
                                                full_matrices=False)
                else:
                    try:
                        import sparsesvd
                    except ImportError:
                        raise ImportError(
                            "for LSA, the `sparsesvd` module is needed but not found; run `easy_install sparsesvd`"
                        )
                    logger.info("computing sparse SVD of %s matrix" %
                                str(docs.shape))
                    ut, s, vt = sparsesvd.sparsesvd(
                        docs, self.k + 30
                    )  # ask for a few extra factors, because for some reason SVDLIBC sometimes returns fewer factors than requested
                    u = ut.T
                    del ut
                del vt
                k = clipSpectrum(s**2, self.k)
                self.u = u[:, :k].copy('F')
                self.s = s[:k]
            else:
                self.u = other.u.copy('F')
                self.s = other.s.copy()
            return
        if self.m != other.m:
            raise ValueError(
                "vector space mismatch: update has %s features, expected %s" %
                (other.m, self.m))
        logger.info("merging projections: %s + %s" %
                    (str(self.u.shape), str(other.u.shape)))
        m, n1, n2 = self.u.shape[0], self.u.shape[1], other.u.shape[1]
        if other.s is None:
            other.u = other.u.todense()
            other.s = 1.0  # broadcasting will promote this to eye(n2) where needed
        # TODO Maybe keep the bases as elementary reflectors, without
        # forming explicit matrices with ORGQR.
        # The only operation we ever need is basis^T*basis ond basis*component.
        # But how to do that in scipy? And is it fast(er)?

        # find component of u2 orthogonal to u1
        # IMPORTANT: keep matrices in memory suitable order for matrix products; failing to do so gives 8x lower performance :(
        self.u = numpy.asfortranarray(
            self.u)  # does nothing if input already fortran-order array
        other.u = numpy.asfortranarray(other.u)
        gemm = matutils.blas('gemm', self.u)
        logger.debug("constructing orthogonal component")
        c = gemm(1.0, self.u, other.u, trans_a=True)
        gemm(-1.0, self.u, c, beta=1.0, c=other.u, overwrite_c=True)

        # perform q, r = QR(component); code hacked out of scipy.linalg.qr
        logger.debug("computing QR of %s dense matrix" % str(other.u.shape))
        geqrf, = get_lapack_funcs(('geqrf', ), (other.u, ))
        qr, tau, work, info = geqrf(other.u, lwork=-1, overwrite_a=True)
        qr, tau, work, info = geqrf(other.u, lwork=work[0], overwrite_a=True)
        del other.u
        assert info >= 0
        r = triu(qr[:n2, :n2])
        if m < n2:  # rare case, #features < #topics
            qr = qr[:, :m]  # retains fortran order
        gorgqr, = get_lapack_funcs(('orgqr', ), (qr, ))
        q, work, info = gorgqr(qr, tau, lwork=-1, overwrite_a=True)
        q, work, info = gorgqr(qr, tau, lwork=work[0], overwrite_a=True)
        assert info >= 0, "qr failed"
        assert q.flags.f_contiguous

        # find the rotation that diagonalizes r
        k = numpy.bmat([[numpy.diag(decay * self.s), c * other.s],
                        [
                            matutils.pad(
                                numpy.matrix([]).reshape(0, 0), min(m, n2),
                                n1), r * other.s
                        ]])
        logger.debug("computing SVD of %s dense matrix" % str(k.shape))
        try:
            # in numpy < 1.1.0, running SVD sometimes results in "LinAlgError: SVD did not converge'.
            # for these early versions of numpy, catch the error and try to compute
            # SVD again, but over k*k^T.
            # see http://www.mail-archive.com/[email protected]/msg07224.html and
            # bug ticket http://projects.scipy.org/numpy/ticket/706
            u_k, s_k, _ = numpy.linalg.svd(
                k, full_matrices=False
            )  # TODO *ugly overkill*!! only need first self.k SVD factors... but there is no LAPACK wrapper for partial svd/eigendecomp in numpy :(
        except numpy.linalg.LinAlgError:
            logging.error("SVD(A) failed; trying SVD(A * A^T)")
            u_k, s_k, _ = numpy.linalg.svd(
                numpy.dot(k, k.T),
                full_matrices=False)  # if this fails too, give up
            s_k = numpy.sqrt(s_k)

        k = clipSpectrum(s_k**2, self.k)
        u_k, s_k = u_k[:, :k], s_k[:k]

        # update & rotate current basis U
        logger.debug("updating orthonormal basis U")
        self.u = gemm(
            1.0, self.u, u_k[:n1]
        )  # TODO temporarily creates an extra (m,k) dense array in memory. find a way to avoid this!
        gemm(1.0, q, u_k[n1:], beta=1.0, c=self.u,
             overwrite_c=True)  # u = [u,u']*u_k
        self.s = s_k
Example #51
0
    def __init__(self, x_pos, x_neg, p='inf', λ=1):
        """
            x_pos ∈ R^{n,m_pos}
            x_neg ∈ R^{n,m_neg}
        """
        n = x_pos.shape[0]
        m_pos = x_pos.shape[1]
        m_neg = x_neg.shape[1]
        assert x_neg.shape[0] is n
        if p is 1:
            Q = None
            c = np.bmat([[zeros(n + 1)], [λ * ones(m_pos + m_neg)], [ones(n)]])
            A_ub =  np.bmat([\
                    [ -x_pos.T, ones(m_pos), -eye(m_pos), zeros(m_pos,m_neg), zeros(m_pos,n) ],\
                    [ x_neg.T, -ones(m_neg), zeros(m_neg,m_pos), -eye(m_neg), zeros(m_neg,n) ],\
                    [ -eye(n), zeros(n), zeros(n,m_pos), zeros(n,m_neg), -eye(n) ],\
                    [ eye(n), zeros(n), zeros(n,m_pos), zeros(n,m_neg), -eye(n) ],\
                    [ zeros(m_pos,n), zeros(m_pos), -eye(m_pos), zeros(m_pos,m_neg), zeros(m_pos,n) ],\
                    [ zeros(m_neg,n), zeros(m_neg), zeros(m_neg,m_pos), -eye(m_neg), zeros(m_neg,n) ]])
            b_ub =  np.bmat([\
                    [ -ones(m_pos) ],\
                    [ -ones(m_neg) ],\
                    [ zeros(n) ],\
                    [ zeros(n) ],\
                    [ zeros(m_pos) ],\
                    [ zeros(m_neg) ]])
            P = poly(A_ub, b_ub)
            sol = QP(Q, c, P).solve()
            self.a = sol[0:n, 0]
            self.b = sol[n, 0]
        if p is 2:
            Q = zeros(n + 1 + m_pos + m_neg, n + 1 + m_pos + m_neg)
            Q[0:n, 0:n] = 2 * eye(n)
            c = zeros(n + 1 + m_pos + m_neg, 1)
            c[(n + 1 + 1 - 1):(n + 1 + m_pos +
                               m_neg)] = λ * ones(m_pos + m_neg, 1)

            A_ub =  np.bmat([\
                    [ -x_pos.T, ones(m_pos), -eye(m_pos), zeros(m_pos,m_neg) ],\
                    [ x_neg.T, -ones(m_neg), zeros(m_neg,m_pos), -eye(m_neg) ],\
                    [ zeros(m_pos,n), zeros(m_pos), -eye(m_pos), zeros(m_pos,m_neg) ],\
                    [ zeros(m_neg,n), zeros(m_neg), zeros(m_neg,m_pos), -eye(m_neg) ]])
            b_ub = np.bmat([\
                    [ -ones(m_pos) ],\
                    [ -ones(m_neg) ],\
                    [ zeros(m_pos) ],\
                    [ zeros(m_neg) ]\
                    ])
            P = poly(A_ub, b_ub)
            sol = QP(Q, c, P).solve()
            self.a = sol[0:n, 0]
            self.b = sol[n, 0]
        if p is 'inf':
            Q = None
            c = np.bmat([[zeros(n + 1)], [λ * ones(m_pos + m_neg)], [ones(1)]])
            A_ub =  np.bmat([\
                    [ -x_pos.T, ones(m_pos), -eye(m_pos), zeros(m_pos,m_neg), zeros(m_pos) ],\
                    [ x_neg.T, -ones(m_neg), zeros(m_neg,m_pos), -eye(m_neg), zeros(m_neg) ],\
                    [ -eye(n), zeros(n), zeros(n,m_pos), zeros(n,m_neg), -ones(n) ],\
                    [ eye(n), zeros(n), zeros(n,m_pos), zeros(n,m_neg), -ones(n) ],\
                    [ zeros(m_pos,n), zeros(m_pos), -eye(m_pos), zeros(m_pos,m_neg), zeros(m_pos) ],\
                    [ zeros(m_neg,n), zeros(m_neg), zeros(m_neg,m_pos), -eye(m_neg), zeros(m_neg) ]])
            b_ub = np.bmat([\
                    [ -ones(m_pos) ],\
                    [ -ones(m_neg) ],\
                    [ zeros(n) ],\
                    [ zeros(n) ],\
                    [ zeros(m_pos) ],\
                    [ zeros(m_neg) ]])
            P = poly(A_ub, b_ub)
            sol = QP(Q, c, P).solve()
            self.a = sol[0:n, 0]
            self.b = sol[n, 0]
Example #52
0
    def hueckel(self, orbe0, Sh, Sv, H0h, H0v):
        """
        perform a Hueckel calculation using the matrix elements passed as arguments

        Parameters:
        ===========
        orbe0: energies of basis orbitals
        Sh: overlap between orbitals on neighbouring horizontally fused porphyrins
        Sv: overlap between orbitals on neighbouring vertically fused porphyrins
        H0h: matrix elements between orbitals on neighbouring horizontally fused porphyrins
        H0v: matrix elements between orbitals on neighbouring vertically fused porphyrins

        Results:
        ========
        en_tot, HLgap: total pi-energy and H**O-LUMO gap in Hartree
        orbe, orbs: orbe[i] is the Hueckel energy belonging to the orbital with the coefficients
           orbs[:,i]
        """
        # arrange the cells of the polyomino (or flake, we'll use the terms interchangeably)
        # on a linear grid. This defines a mapping (i,j) -> k
        n, m = self.grid.shape
        k = 0
        grid2chain = {}  # mapping from 2D grid to linear chain
        chain2grid = {}  # inverse mapping
        for i in range(0, n):
            for j in range(0, m):
                if self.grid[i, j] == 1:
                    # occupied
                    grid2chain[(i, j)] = k
                    chain2grid[k] = (i, j)
                    k += 1
        # number of monomers in the flake == number of 1s in the grid
        N = np.sum(self.grid[self.grid == 1])
        # each monomer contributes 4 valence electrons in the 2 orbitals a1u and a2u
        nelec = 4 * N
        assert k == np.sum(self.grid[self.grid == 1])
        # construct matrix elements for the whole flake from the matrix elements between two porphyrins
        norb = Sh.shape[0]  # number of orbitals per porphyrin
        Zero = np.zeros((norb, norb))
        # list of block matrices
        S = [[Zero for l in range(0, N)] for k in range(0, N)]
        H0 = [[Zero for l in range(0, N)] for k in range(0, N)]
        for k in range(0, N):
            (i1, j1) = chain2grid[k]
            for l in range(0, N):
                (i2, j2) = chain2grid[l]
                if k == l:
                    # diagonal elements are orbital energies
                    S[k][l] = np.eye(norb)
                    H0[k][l] = np.diag(orbe0)
                else:
                    # matrix elements between orbitals on porphyrin (i1,j1) and (i2,j2)
                    # are non-zero only if they are nearest neighbours
                    if i2 - i1 == 1 and j2 - j1 == 0:
                        # horizontal nearest neighbours: #-#
                        S[k][l] = Sh
                        H0[k][l] = H0h
                    elif i2 - i1 == 0 and j2 - j1 == 1:
                        # vertical nearest neighbours: #
                        #                              |
                        #                              #
                        S[k][l] = Sv
                        H0[k][l] = H0v
        #
        S = np.bmat(S)
        H0 = np.bmat(H0)
        # make matrices hermitian by adding symmetric matrix elements, that were skipped above
        S = 0.5 * (S + S.transpose())
        H0 = 0.5 * (H0 + H0.transpose())
        # Hueckel energies and coefficients
        orbe, orbs = sla.eigh(H0, S)
        # fill the orbitals of lowest energy with 2 electrons each according to the Aufbau principle
        H**O = nelec / 2 - 1
        print("number of porphyrin monomers N = %d" % N)
        print("number of electron pairs = %d" % (nelec / 2))
        print("index of H**O = %d" % H**O)
        LUMO = H**O + 1
        en_tot = 2.0 * np.sum(
            orbe[:H**O + 1])  # sum over energies of doubly occupied orbitals
        HLgap = orbe[LUMO] - orbe[H**O]
        print("--------------------------------------------------")
        print(self)
        print("Hueckel total energy: %8.6f eV" %
              (en_tot * AtomicData.hartree_to_eV))
        print("Hueckel total energy/site: %8.6f" %
              ((en_tot / float(N)) * AtomicData.hartree_to_eV))
        print("Hueckel H**O: %8.6f eV" %
              (orbe[H**O] * AtomicData.hartree_to_eV))
        print("Hueckel LUMO: %8.6f eV" %
              (orbe[LUMO] * AtomicData.hartree_to_eV))
        print("Hueckel H**O-LUMO gap: %8.6f eV" %
              (HLgap * AtomicData.hartree_to_eV))
        print("--------------------------------------------------")
        # save additional data as member variables for later use
        # mapping from k to (i,j)
        self.chain2grid = chain2grid
        # indexes of H**O and LUMO
        self.H**O = H**O
        self.LUMO = LUMO
        # number of orbitals per site
        self.norb = norb
        # number of sites
        self.N = N

        return en_tot, HLgap, orbe, orbs
Example #53
0
# Does the Buys-Ballot decomposotion.
y = d[:, 1]
m1 = np.ones(len(y))
m2 = np.arange(1, len(y) + 1)
s = (len(y), 365)
S = np.zeros(s)
for t in range(len(y)):
    S[t, int(np.rint(t - np.floor((t + 1) / 365.24) * 365.24) % 365)] = 1
for j in range(364):
    S[:, j] = S[:, j] - S[:, 364]

M = np.array((m1, m2)).T
S = np.delete(S, 364, 1)

mat1 = np.bmat([[np.dot(M.T, M), np.dot(M.T, S)],
                [np.dot(S.T, M), np.dot(S.T, S)]])
mat2 = np.concatenate((np.dot(M.T, y), np.dot(S.T, y)), axis=0)
matFinal = np.dot(np.linalg.inv(mat1), mat2)
matFinal = np.array(matFinal)[0]

y_trend = matFinal[0] * m1 + matFinal[1] * m2
y_season = np.dot(S, matFinal[2:])
y_res = y - y_trend - y_season
y_pred = y - y_res

# Exports a csv file of the predictions, because of reasons.

L_trend = np.array([y_trend.tolist()])
L_season = np.array([y_season.tolist()])
L_res = np.array([y_res.tolist()])
L_pred = np.array([y_pred.tolist()])
Example #54
0
def bmat(*args, **kwargs):
    with warnings.catch_warnings(record=True):
        warnings.filterwarnings(
            'ignore', '.*the matrix subclass is not the recommended way.*')
        return np.bmat(*args, **kwargs)
Example #55
0
#方法一:
A = np.mat([[1,0,1,0],[-1,2,0,1],[1,0,4,1],[-1,-1,2,0]])
print(A)
#方法二:
B = np.mat('1 0 0 0;0 1 0 0; -1 2 1 0;1 1 0 1')
print(B)

# 1.matrix函数
# np.matrix

# 3.bmat函数:通过分块矩阵创建大矩阵。
A =  np.mat([[1,1],[1,1]])
B =  np.mat([[2,2],[2,2]])
C =  np.mat([[3,3],[3,3]])
D =  np.mat([[4,4],[4,4]])
big_mat = np.bmat([[A,B],[C,D]])
print(big_mat)


#矩阵运算
A = np.mat([[1,1],[1,1]])
B = np.mat([[2,2],[2,2]])
print(A*3)
print(A+B)

# A = np.mat([[1,2,3,4],[2,0,1,5]])
# B = np.mat([[1,0,2],[-1,1,3],[4,1,0],[2,3,4]])
# C = A*B
# print(C)

result = np.multiply(A,B)#对应元素相乘
# this is hiding a lot of stuff!
phys, ext = boundary.find_interior_points(full_grid)
phys = full_grid.reshape(phys)
ext = full_grid.reshape(ext)

################################################################################
# solve for the density

DLP = Stokes_Layer_Singular_Form(boundary, ifdipole=True)
A = -0.5*np.eye(2*boundary.N) + DLP
# fix the nullspace
Nxx = boundary.normal_x[:,None]*boundary.normal_x*boundary.weights
Nxy = boundary.normal_x[:,None]*boundary.normal_y*boundary.weights
Nyx = boundary.normal_y[:,None]*boundary.normal_x*boundary.weights
Nyy = boundary.normal_y[:,None]*boundary.normal_y*boundary.weights
NN = np.bmat([[Nxx, Nxy], [Nyx, Nyy]])
A += NN
tau = np.linalg.solve(A, bc)

################################################################################
# naive evaluation

# generate a target for the physical grid
gridp = Grid([-2,2], N, [-2,2], N, mask=phys)

# evaluate at the target points
u = np.zeros_like(gridp.xg)
v = np.zeros_like(gridp.xg)
Up = Stokes_Layer_Apply(boundary, gridp, dipstr=tau, backend='FMM',
															out_type='stacked')
up = Up[0]
Example #57
0
    def poison_data(self, poisx, poisy, tstart, visualize, newlogdir):
        """
        poison_data takes an initial set of poisoning points and optimizes it
        using gradient descent with parameters set in __init__

        poisxinit, poisyinit: initial poisoning points 
        tstart: start time - used for writing out performance
        visualize: whether we want to visualize the gradient descent steps
        newlogdir: directory to log into, to save the visualization
        """

        poisct = poisx.shape[0]
        print("Poison Count: {}".format(poisct))

        new_poisx = np.zeros(poisx.shape)
        new_poisy = [None for a in poisy]

        if visualize:
            # initialize poisoning histories
            poisx_hist = np.zeros((10, poisx.shape[0], poisx.shape[1]))
            poisy_hist = np.zeros((10, poisx.shape[0]))

            # store first round
            poisx_hist[0] = poisx[:]
            poisy_hist[0] = np.array(poisy)

        best_poisx = np.zeros(poisx.shape)
        best_poisy = [None for a in poisy]

        best_obj = 0
        last_obj = 0
        count = 0

        if self.mp:
            import multiprocessing as mp
            workerpool = mp.Pool(max(1, mp.cpu_count() // 2 - 1))
        else:
            workerpool = None

        sig = self.compute_sigma()  # can already compute sigma and mu
        mu = self.compute_mu()  # as x_c does not change them
        eq7lhs = np.bmat([[sig, np.transpose(mu)], [mu, np.matrix([1])]])

        # initial model - used in visualization
        clf_init, lam_init = self.learn_model(self.trnx, self.trny, None)
        clf, lam = clf_init, lam_init

        # figure out starting error
        it_res = self.iter_progress(poisx, poisy, poisx, poisy)

        print("Iteration {}:".format(count))
        print("Objective Value: {} Change: {}".format(it_res[0], it_res[0]))
        print("Validation MSE: {}".format(it_res[2][0]))
        print("Test MSE: {}".format(it_res[2][1]))

        last_obj = it_res[0]
        if it_res[0] > best_obj:
            best_poisx, best_poisy, best_obj = poisx, poisy, it_res[0]

        # stuff to put into self.resfile
        towrite = [
            poisct, count, it_res[0], it_res[1], it_res[2][0], it_res[2][1],
            (datetime.datetime.now() - tstart).total_seconds()
        ]

        self.resfile.write(','.join([str(val) for val in towrite]) + "\n")
        self.trainfile.write("\n")
        self.trainfile.write(str(poisct) + "," + str(count) + '\n')

        if visualize:
            self.trainfile.write('{},{}\n'.format(poisy[0], new_poisx[0]))
        else:
            for j in range(poisct):
                self.trainfile.write(','.join(
                    [str(val) for val
                     in [poisy[j]] + poisx[j].tolist()[0] \
                     ]) + '\n')

        # main work loop
        while True:
            count += 1
            new_poisx = np.matrix(np.zeros(poisx.shape))
            new_poisy = [None for a in poisy]
            x_cur = np.concatenate((self.trnx, poisx), axis=0)
            y_cur = self.trny + poisy

            clf, lam = self.learn_model(x_cur, y_cur, None)
            pois_params = [(poisx[i], poisy[i], eq7lhs, mu, clf, lam) \
                           for i in range(poisct)]
            outofboundsct = 0

            if workerpool:  # multiprocessing
                for i, cur_pois_res in enumerate(
                        workerpool.map(self.poison_data_subroutine,
                                       pois_params)):
                    new_poisx[i] = cur_pois_res[0]
                    new_poisy[i] = cur_pois_res[1]
                    outofboundsct += cur_pois_res[2]

            else:
                for i in range(poisct):
                    cur_pois_res = self.poison_data_subroutine(pois_params[i])

                    new_poisx[i] = cur_pois_res[0]
                    new_poisy[i] = cur_pois_res[1]
                    outofboundsct += cur_pois_res[2]

            if visualize:
                poisx_hist[count] = new_poisx[:]
                poisy_hist[count] = np.array(new_poisy).ravel()

            it_res = self.iter_progress(poisx, poisy, new_poisx, new_poisy)

            print("Iteration {}:".format(count))
            print("Objective Value: {} Change: {}".format(
                it_res[0], it_res[0] - it_res[1]))

            print("Validation MSE: {}".format(it_res[2][0]))
            print("Test MSE: {}".format(it_res[2][1]))
            print("Y pushed out of bounds: {}/{}".format(
                outofboundsct, poisct))

            # if we don't make progress, decrease learning rate
            if it_res[0] < it_res[1]:
                print("no progress")
                self.eta *= 0.75
                new_poisx, new_poisy = poisx, poisy
            else:
                poisx = new_poisx
                poisy = new_poisy

            if it_res[0] > best_obj:
                best_poisx, best_poisy, best_obj = poisx, poisy, it_res[1]

            last_obj = it_res[1]

            towrite = [poisct, count, it_res[0], it_res[1] - it_res[0], \
                       it_res[2][0], it_res[2][1], \
                       (datetime.datetime.now() - tstart).total_seconds()]

            self.resfile.write(','.join([str(val) for val in towrite]) + "\n")
            self.trainfile.write("\n{},{}\n".format(poisct, count))

            for j in range(poisct):
                self.trainfile.write(','.join([
                    str(val)
                    for val in [new_poisy[j]] + new_poisx[j].tolist()[0]
                ]) + '\n')
            it_diff = abs(it_res[0] - it_res[1])

            # stopping conditions
            if count >= 15 and (it_diff <= self.eps or count > 50):
                break

            # visualization done - plotting time
            if visualize and count >= 9:
                self.plot_path(clf_init, lam_init, eq7lhs, mu, poisx_hist,
                               poisy_hist, newlogdir)
                break

        if workerpool:
            workerpool.close()

        return best_poisx, best_poisy
Example #58
0
    def calcCpPosition(self, filenames, position_range, sigma, mprior):
        '''
        Calculate the uncertainties of the predictions deriving from uncertainties in the fault position.
        From Ragon et al. (2018) GJI
        
        :Args:
            * filenames : array with strings, corresponding to name of the Green's Functions files
                    ex: ['GF_strike_-1.txt','GF_strike_0.txt','GF_strike_1.txt']
                    If there are several fault segments (e.g. several faults in fault), len(filenames) must be equal to the number of segments
                    ex: [['GF_seg1_strike_-1.txt','GF_seg1_strike_0.txt','GF_seg1_strike_1.txt'],['GF_seg2_strike_-2.txt','GF_strike_dip_-1.txt','GF_seg2_strike_0.txt','GF_seg2_strike_1.txt']]
            * position_range : difference between initial position and the ones used to calculate Green's Functions (in km).
                    Positive values are +90 degrees from the prior strike
                    ex: position_range = [-1,0,1,2]
                    If there are several fault segments (e.g. several faults in fault), len(dip_range) must be equal to the number of segments
                    ex: position_range = [[-1,0,1,2],[-2,-1,0,1,2,3],[-1.5,-1,-0.5,0],[0,1,2]] for a fault with 4 segments
            * sigma : prior uncertainty (standard deviation) in the fault position (in km)
                    ex: sigma = 5
                    If there are several fault segments (e.g. multifault is True), len(sigma) must be equal to len(filenames)
                    ex: sigma = [1,0.5,1,0.5] for a fault with 4 segments
            * mprior : initial model used to calculate Cp
                    length must be equal to two times the number of patches
                    can be uniform and derived from Mo, ex: meanslip = -Mo*10**(-7)/(3*10**10*length*width)
                    OR derived from a first inversion without accounting for uncertainties
                   
        :Returns:
            * CpPosition
        '''
        # For a fault with one segment
        if self.multifault is False:
            # Read GFs
            gfs = []
            for i in range(len(filenames)):
                gf = np.fromfile(os.path.join(self.gfdir, filenames[i]))
                gf = np.reshape(gf, (len(gf) / self.Np, self.Np))
                gfs.append(gf)

            # Calculate the sensitivity Kernels of the Green's Functions (the derivatives) by linearizing their variation
            slope = np.empty(gfs[0].shape)
            rvalue = np.empty(gfs[0].shape)
            pvalue = np.empty(gfs[0].shape)
            stderr = np.empty(gfs[0].shape)
            inter = np.empty(gfs[0].shape)
            coeff = []
            for p in position_range:
                if p < 0:
                    position_vals = [p, 0]
                elif p == 0:
                    continue
                else:
                    position_vals = [0, p]
                for i in range(gfs[0].shape[0]):
                    for j in range(gfs[0].shape[1]):
                        # Do a linear regression for each couple parameter/data
                        slope[i, j], inter[i, j], rvalue[i, j], pvalue[
                            i, j], stderr[i, j] = linregress(
                                position_vals, [
                                    gfs[k][i, j] for k in [
                                        position_range.index(position_vals[0]),
                                        position_range.index(position_vals[1])
                                    ]
                                ])
                coeff.append(slope)

            # Select the maximum coefficient
            Kpos = np.max(coeff, axis=0)
            # Build the Covariance matrix
            K = []
            K.append(Kpos)
            kernels = np.asarray(K)
            k = np.transpose(np.matmul(kernels, mprior))
            C1 = np.matmul(k, [[np.float(sigma)**2]])
            CpPosition = np.matmul(C1, np.transpose(k))

            self.KPosition = kernels
            self.CovPosition = np.array([[np.float(sigma)**2]])
            if self.KernelsFull == []:
                self.KernelsFull = self.KPosition
            else:
                self.KernelsFull = np.concatenate(
                    (self.KernelsFull, self.KPosition))
            if self.CovFull == []:
                self.CovFull = self.CovPosition
            else:
                Z = np.zeros(
                    (np.shape(self.CovFull)[0], np.shape(self.CovPosition)[0]),
                    dtype=int)
                self.CovFull = np.asarray(
                    np.bmat([[self.CovFull, Z], [Z, self.CovPosition]]))

            self.CpPosition = CpPosition
            if self.CpFull == []:
                self.CpFull = self.CpPosition
            else:
                self.CpFull = np.add(self.CpFull, self.CpPosition)

            if self.export is not None:
                self.CpFull.tofile(self.export + 'CpFull.bin')
                self.CpPosition.tofile(self.export + 'CpPosition.bin')

            if self.verbose:
                print('---------------------------------')
                print('---------------------------------')
                print('CpPosition successfully calculated')

        # For a multi-segmented fault
        else:
            Kpos = []

            for f in range(len(filenames)):
                # Read GFs
                gfs = []
                for i in range(len(filenames[f])):
                    gf = np.fromfile(os.path.join(self.gfdir, filenames[f][i]))
                    gf = np.reshape(gf, (len(gf) / self.Np, self.Np))
                    gfs.append(gf)

                # Calculate the sensitivity Kernels of the Green's Functions (the derivatives) by linearizing their variation
                slope = np.empty(gfs[0].shape)
                rvalue = np.empty(gfs[0].shape)
                pvalue = np.empty(gfs[0].shape)
                stderr = np.empty(gfs[0].shape)
                inter = np.empty(gfs[0].shape)
                coeff = []
                for p in position_range:
                    if p < 0:
                        position_vals = [p, 0]
                    elif p == 0:
                        continue
                    else:
                        position_vals = [0, p]
                    for i in range(gfs[0].shape[0]):
                        for j in range(gfs[0].shape[1]):
                            # Do a linear regression for each couple parameter/data
                            slope[i, j], inter[i, j], rvalue[i, j], pvalue[
                                i, j], stderr[i, j] = linregress(
                                    position_vals, [
                                        gfs[k][i, j] for k in [
                                            position_range.index(
                                                position_vals[0]),
                                            position_range.index(
                                                position_vals[1])
                                        ]
                                    ])
                    coeff.append(slope)

                # Select the maximum coefficient
                Kpos.append(np.max(coeff, axis=0))

            # Build the Covariance matrix
            kernels = np.asarray(Kpos)
            k = np.transpose(np.matmul(kernels, mprior))
            Covpos = np.zeros((len(self.faults), len(self.faults)))
            for f in range(len(self.faults)):
                Covpos[f, f] = sigma[f]**2
            C1 = np.matmul(k, Covpos)
            CpPosition = np.matmul(C1, np.transpose(k))

            self.KPosition = kernels
            self.CovPosition = Covpos
            if self.KernelsFull == []:
                self.KernelsFull = self.KPosition
            else:
                self.KernelsFull = np.concatenate(
                    (self.KernelsFull, self.KPosition))
            if self.CovFull == []:
                self.CovFull = self.CovPosition
            else:
                Z = np.zeros(
                    (np.shape(self.CovFull)[0], np.shape(self.CovPosition)[0]),
                    dtype=int)
                self.CovFull = np.asarray(
                    np.bmat([[self.CovFull, Z], [Z, self.CovPosition]]))

            self.CpPosition = CpPosition
            if self.CpFull == []:
                self.CpFull = self.CpPosition
            else:
                self.CpFull = np.add(self.CpFull, self.CpPosition)

            if self.export is not None:
                self.CpFull.tofile(self.export + 'CpFull.bin')
                self.CpPosition.tofile(self.export + 'CpPosition.bin')

            if self.verbose:
                print('---------------------------------')
                print('---------------------------------')
                print('CpPosition successfully calculated')

        return self.CpPosition

    # ----------------------------------------------------------------------


#EOF
Example #59
0
    def calibrate_mag(self, req):
        if len(self.mag_samples) >= 10:
            self.sampling = False
            xyz = matrix(self.mag_samples)
            rospy.loginfo('Starting magnetometer calibration with %d samples' %
                          (len(self.mag_samples)))

            #compute the vectors [ x^2 y^2 z^2 2*x*y 2*y*z 2*x*z x y z 1] for every sample
            # the result for the x*y y*z and x*z components should be divided by 2
            xyz2 = power(xyz, 2)
            xy = multiply(xyz[:, 0], xyz[:, 1])
            xz = multiply(xyz[:, 0], xyz[:, 2])
            yz = multiply(xyz[:, 1], xyz[:, 2])

            # build the data matrix
            A = bmat('xyz2 xy xz yz xyz')

            b = 1.0 * ones((xyz.shape[0], 1))

            # solve the system Ax = b
            q, res, rank, sing = linalg.lstsq(A, b)

            # build scaled ellipsoid quadric matrix (in homogeneous coordinates)
            A = matrix([[q[0][0], 0.5 * q[3][0], 0.5 * q[4][0], 0.5 * q[6][0]],
                        [0.5 * q[3][0], q[1][0], 0.5 * q[5][0], 0.5 * q[7][0]],
                        [0.5 * q[4][0], 0.5 * q[5][0], q[2][0], 0.5 * q[8][0]],
                        [0.5 * q[6][0], 0.5 * q[7][0], 0.5 * q[8][0], -1]])

            # build scaled ellipsoid quadric matrix (in regular coordinates)
            Q = matrix([[q[0][0], 0.5 * q[3][0], 0.5 * q[4][0]],
                        [0.5 * q[3][0], q[1][0], 0.5 * q[5][0]],
                        [0.5 * q[4][0], 0.5 * q[5][0], q[2][0]]])

            # obtain the centroid of the ellipsoid
            x0 = linalg.inv(-1.0 * Q) * matrix(
                [0.5 * q[6][0], 0.5 * q[7][0], 0.5 * q[8][0]]).T

            # translate the ellipsoid in homogeneous coordinates to the center
            T_x0 = matrix(eye(4))
            T_x0[0, 3] = x0[0]
            T_x0[1, 3] = x0[1]
            T_x0[2, 3] = x0[2]
            A = T_x0.T * A * T_x0

            # rescale the ellipsoid quadric matrix (in regular coordinates)
            Q = Q * (-1.0 / A[3, 3])

            # take the cholesky decomposition of Q. this will be the matrix to transform
            # points from the ellipsoid to a sphere, after correcting for the offset x0
            L = eye(3)
            try:
                L = linalg.cholesky(Q).transpose()
            except Exception, e:
                rospy.loginfo(str(e))
                L = eye(3)

            rospy.loginfo("Magnetometer offset:\n %s", x0)
            rospy.loginfo("Magnetometer Calibration Matrix:\n %s", L)

            file_path = os.path.join(self.__location__, "last_mag_calibration")
            f = open(file_path, "w+")
            pickle.dump(x0, f)
            pickle.dump(L, f)
            pickle.dump(matrix(self.mag_samples), f)
            f.close()
            # back up calibration
            calib_name = strftime("mag_calibration_%d_%m_%y_%H_%M_%S",
                                  localtime())
            calib_path = os.path.join(self.__location__, calib_name)
            os.system("cp %s %s" % (file_path, calib_path))

            self.mag_matrix = L
            self.mag_offset = squeeze(array(x0))
Example #60
0
def sdr_ip(y,H):
    epsilon = 1e-5
    K=np.shape(H)[1]
    Q=np.array(np.bmat([[np.dot(H.T,H),-np.dot(H.T,y)],[-np.dot(H.T,y).T,np.dot(y.T,y)]]))
    Q = -Q;
    n = np.shape(Q)[1]
    X = np.identity(n)
    lambd = 1.1*np.dot(np.absolute(Q),np.ones((n,1)))
    Z = np.diag(np.reshape(lambd,n)) - Q

    k = 0

    while(np.trace(np.dot(Z,X)) > epsilon):
        mu = np.trace(np.dot(Z,X))/n
        mu = mu/2

        #compute newton search direction
        W = np.linalg.inv(Z)
        T = np.multiply(W,X)

        dlambd = np.linalg.solve(T, np.reshape(mu*np.diag(W) - np.ones(n) ,(n,1)))
        dZ = np.diag(np.reshape(dlambd,n,1))
        dX = mu*W - X - np.dot(W,np.dot(dZ,X))
        dX = 0.5*(dX + dX.T)

        #line search
        ap = 0.9
        ad = 0.9
        tau = 0.99

        j=1
        try:
            R = np.linalg.cholesky(X + ap*dX) # should be matrix!!!!
            s = 0
        except np.linalg.linalg.LinAlgError:
            s = 1
        while(s==1):
            j += 1
            ap = tau * ( ap ** j )
            try:
                R = np.linalg.cholesky(X + ap*dX)
                s = 0
            except np.linalg.linalg.LinAlgError:
                s = 1
        X = X + ap*dX

        j=1;
        try:
            R = np.linalg.cholesky(Z + ad*dZ)
            s = 0
        except np.linalg.linalg.LinAlgError:
            s = 1
        while(s==1):
            j += 1
            ad = tau * ( ad ** j )
            try:
                R = np.linalg.cholesky(Z + ad*dZ)
                s = 0
            except np.linalg.linalg.LinAlgError:
                s = 1
        Z = Z + ad*dZ;
        lambd = lambd + ad*dlambd
        x=np.sign(X[range(K),-1]).T
    return (x)