Example #1
0
    def __init__(self, pos, neg, k=5):
        self.k = k
        pos = pos.T
        neg = neg.T

        totals = pos.sum(axis=1)
        popular = numpy.argsort(totals, axis=0)[::-1, :][1 : 1 + k, :]
        popular = numpy.array(popular.T)[0]
        self.popular = popular

        pos = pos[popular, :].todense()
        neg = neg[popular, :].todense()

        self.posmu = pos.mean(axis=1)
        self.negmu = neg.mean(axis=1)

        p = pos - self.posmu
        n = neg - self.negmu

        self.pcov = p * p.T / p.shape[1]
        self.ncov = n * n.T / n.shape[1]

        self.pdet = dlinalg.det(self.pcov)
        self.ndet = dlinalg.det(self.ncov)

        assert self.pdet != 0
        assert self.ndet != 0

        self.picov = dlinalg.inv(self.pcov)
        self.nicov = dlinalg.inv(self.ncov)
Example #2
0
def lnmixgaussian(x, params):
    """
    NAME:
       lnmixgaussian
    PURPOSE:
       returns the log of a mixture of two two-dimensional gaussian
    INPUT:
       x - 2D point to evaluate the Gaussian at
       params - mean and variances ([mean_array,inverse variance matrix, mean_array, inverse variance, amp1])
    OUTPUT:
       log N(mean,var)
    HISTORY:
       2009-10-30 - Written - Bovy (NYU)
    """
    return sc.log(
        params[4]
        / 2.0
        / sc.pi
        * sc.sqrt(linalg.det(params[1]))
        * sc.exp(-0.5 * sc.dot(x - params[0], sc.dot(params[1], x - params[0])))
        + (1.0 - params[4])
        / 2.0
        / sc.pi
        * sc.sqrt(linalg.det(params[3]))
        * sc.exp(-0.5 * sc.dot(x - params[2], sc.dot(params[3], x - params[2])))
    )
Example #3
0
File: sphere.py Project: jstraub/js
def testQuaternion():
  dtheta = 30.0
  quat = Quaternion()
  print quat.q
  print quat.toRot()
  print det(quat.toRot())

  figm = mlab.figure(bgcolor=(1,1,1))
  for i in range(100):
    print quat.sampleUnif(0.5*np.pi)
    k,theta = quat.toAxisAngle()
    print theta*180.0/np.pi
    plotCosy(figm, quat.toRot())

  figm = mlab.figure(bgcolor=(1,1,1))
  for i in range(100):
    print quat.sample(dtheta)
    k,theta = quat.toAxisAngle()
    print theta*180.0/np.pi
    plotCosy(figm, quat.toRot())

  figm1 = mlab.figure(bgcolor=(1,1,0.0))
  for i in range(100):
    # sample rotation axis
    k = np.random.rand(3)-0.5
    # sample uiniformly from +- 5 degrees
    theta =  (np.asscalar(np.random.rand(1)) + dtheta - 0.5) *np.pi/180.0 # (np.a      sscalar(np.random.rand(1))-0.5)*np.pi/(180.0/(2.0*dtheta))
    print 'perturbation: {} theta={}'.format(k/norm(k),theta*180.0/np.pi)
    dR = RodriguesRotation(k/norm(k),theta)
    plotCosy(figm1, dR)
    
  mlab.show()
Example #4
0
   def LL(self,h,X=None,stack=True,REML=False):

      """
	 Computes the log-likelihood for a given heritability (h).  If X==None, then the 
	 default X0t will be used.  If X is set and stack=True, then X0t will be matrix concatenated with
	 the input X.  If stack is false, then X is used in place of X0t in the LL calculation.
	 REML is computed by adding additional terms to the standard LL and can be computed by setting REML=True.
      """

      if X is None: X = self.X0t
      elif stack: 
	 self.X0t_stack[:,(self.q)] = matrixMult(self.Kve.T,X)[:,0]
	 X = self.X0t_stack

      n = float(self.N)
      q = float(X.shape[1])
      beta,sigma,Q,XX_i,XX = self.getMLSoln(h,X)
      LL = n*np.log(2*np.pi) + np.log(h*self.Kva + (1.0-h)).sum() + n + n*np.log(1.0/n * Q)
      LL = -0.5 * LL

      if REML:
	 LL_REML_part = q*np.log(2.0*np.pi*sigma) + np.log(det(matrixMult(X.T,X))) - np.log(det(XX))
	 LL = LL + 0.5*LL_REML_part


      LL = LL.sum()
      return LL,beta,sigma,XX_i
Example #5
0
	def __init__(self,n, dimz = 2, dimx = 3):
		
		self.n = n
		self.W = RS.normal(0,1, size = (dimx,dimz))
		self.sigx = 0.000000000001#RS.normal(0,1)
		self.dimz = dimz
		self.dimx = dimx
		
		data = util.generate_data(n, self.W, self.sigx, dimx, dimz)
		self.observed = data[0]
		self.latent = data[1]
		
		self.prec = (1/self.sigx)*np.dot(self.W.transpose(), self.W)
		self.cov = np.linalg.inv(self.prec)
		
		'''
		values for normalisation computation-- messy!
		'''
		temp1 = (2*np.pi)**(dimz/2.0)*np.sqrt(det(self.cov))
		temp2 = det(2*np.pi*self.sigx*np.identity(dimz))
		self.pc_norm1 = temp1/temp2
		temp3 = np.linalg.inv(np.dot(self.W.transpose(), self.W))
		self.wtwinv = temp3
		temp3 = np.dot(self.W, temp3)
		self.pc_norm2 = np.dot(temp3, self.W.transpose())
Example #6
0
def find_best_rotation(q1, q2):
    """
    This function calculates the best rotation between two srvfs using
    procustes rigid alignment

    :param q1: numpy ndarray of shape (2,M) of M samples
    :param q2: numpy ndarray of shape (2,M) of M samples

    :rtype: numpy ndarray
    :return q2new: optimal rotated q2 to q1
    :return R: rotation matrix

    """
    eps = finfo(double).eps
    n, T = q1.shape
    A = q1.dot(q2.T)
    U, s, V = svd(A)

    if (abs(det(U) * det(V) - 1) < 10 * eps):
        S = eye(n)
    else:
        S = eye(n)
        S[:, -1] = -S[:, -1]

    R = U.dot(S).dot(V.T)
    q2new = R.dot(q2)

    return (q2new, R)
Example #7
0
	def __init__(self,n, dimz = 2, dimx = 3):
		
		self.n = n
		self.W = RS.normal(0,1, size = (dimx,dimz))
		self.sigx = 0.000000000001#RS.normal(0,1)
		self.dimz = dimz
		self.dimx = dimx
		
		data = self.generate_data(n)
		self.observed = data[0]
		#we keep this to test
		self.latent = data[1]
		
		self.prec = (1/self.sigx)*np.dot(self.W.transpose(), self.W)
		self.cov = np.linalg.inv(self.prec)
		
		'''
		values for normalisation computation
		'''
		temp1 = (2*np.pi)**(dimz/2.0)*np.sqrt(det(self.cov))
		temp2 = det(2*np.pi*self.sigx*np.identity(dimz))
		self.pc_norm1 = temp1/temp2
		temp3 = np.linalg.inv(np.dot(self.W.transpose(), self.W))
		self.wtwinv = temp3
		temp3 = np.dot(self.W, temp3)
		self.pc_norm2 = np.dot(temp3, self.W.transpose())
		
		'''
		prior for n factors will be product of n priors~ N(0,I)
		'''
		#I = np.identity(dimz)
		#product_priors = MVN(0, n*I)
		'''
def matrix_normal_density(X, M, U, V):
    """Sample from a matrix normal distribution"""
    norm = - 0.5*np.log(la.det(2*np.pi*U)) - 0.5*np.log(la.det(2*np.pi*V))
    XM = X-M
    pptn = -0.5*np.trace( np.dot(la.solve(U,XM),la.solve(V,XM.T)) )
    pdf = norm + pptn
    return pdf
Example #9
0
 def from_matrix44(self, aff):
     """
     Convert a 4x4 matrix describing an affine transform into a
     12-sized vector of natural affine parameters: translation,
     rotation, log-scale, pre-rotation (to allow for shearing when
     combined with non-unitary scales). In case the transform has a
     negative determinant, set the `_direct` attribute to False.
     """
     vec12 = np.zeros((12,))
     vec12[0:3] = aff[:3, 3]
     # Use SVD to find orthogonal and diagonal matrices such that
     # aff[0:3,0:3] == R*S*Q
     R, s, Q = spl.svd(aff[0:3, 0:3]) 
     if spl.det(R) < 0:
         R = -R
         Q = -Q
     r = rotation_mat2vec(R)
     if spl.det(Q) < 0:
         Q = -Q
         self._direct = False
     q = rotation_mat2vec(Q)
     vec12[3:6] = r
     vec12[6:9] = np.log(np.maximum(s, TINY))
     vec12[9:12] = q
     self._vec12 = vec12
Example #10
0
    def _update_precisions(self):
        """Update the variational distributions for the precisions"""
        if self.cvtype == 'spherical':
            self._a = 0.5 * self.n_features * np.sum(self._z, axis=0)
            for k in xrange(self.n_components):
                # XXX: how to avoid this huge temporary matrix in memory
                dif = (self._X - self._means[k])
                self._b[k] = 1.
                d = np.sum(dif * dif, axis=1)
                self._b[k] += 0.5 * np.sum(
                    self._z.T[k] * (d + self.n_features))
                self._bound_prec[k] = (
                    0.5 * self.n_features * (
                        digamma(self._a[k]) - np.log(self._b[k])))
            self._precs = self._a / self._b

        elif self.cvtype == 'diag':
            for k in xrange(self.n_components):
                self._a[k].fill(1. + 0.5 * np.sum(self._z.T[k], axis=0))
                ddif = (self._X - self._means[k])  # see comment above
                for d in xrange(self.n_features):
                    self._b[k, d] = 1.
                    dd = ddif.T[d] * ddif.T[d]
                    self._b[k, d] += 0.5 * np.sum(self._z.T[k] * (dd + 1))
                self._precs[k] = self._a[k] / self._b[k]
                self._bound_prec[k] = 0.5 * np.sum(digamma(self._a[k])
                                                    - np.log(self._b[k]))
                self._bound_prec[k] -= 0.5 * np.sum(self._precs[k])

        elif self.cvtype == 'tied':
            self._a = 2 + self._X.shape[0] + self.n_features
            self._B = (self._X.shape[0] + 1) * np.identity(self.n_features)
            for i in xrange(self._X.shape[0]):
                for k in xrange(self.n_components):
                    dif = self._X[i] - self._means[k]
                    self._B += self._z[i, k] * np.dot(dif.reshape((-1, 1)),
                                                      dif.reshape((1, -1)))
            self._B = linalg.pinv(self._B)
            self._precs = self._a * self._B
            self._detB = linalg.det(self._B)
            self._bound_prec = 0.5 * detlog_wishart(
                self._a, self._B, self._detB, self.n_features)
            self._bound_prec -= 0.5 * self._a * np.trace(self._B)

        elif self.cvtype == 'full':
            for k in xrange(self.n_components):
                T = np.sum(self._z.T[k])
                self._a[k] = 2 + T + self.n_features
                self._B[k] = (T + 1) * np.identity(self.n_features)
                dx = self._X - self._means[k]
                self._B[k] += np.dot((self._z[:, k] * dx.T), dx)
                self._B[k] = linalg.inv(self._B[k])
                self._precs[k] = self._a[k] * self._B[k]
                self._detB[k] = linalg.det(self._B[k])
                self._bound_prec[k] = 0.5 * detlog_wishart(self._a[k],
                                                           self._B[k],
                                                           self._detB[k],
                                                           self.n_features)
                self._bound_prec[k] -= 0.5 * self._a[k] * np.trace(self._B[k])
Example #11
0
def KL_normal(m1, sigma1, m2, sigma2):
    """
    Calculates the KL divergence between two normal distributions specified by
    N(``mu1``, ``sigma1``), N(``mu2``, ``sigma2``)
    """

    return 1. / 2. * (math.log(det(sigma2) / det(sigma1)) - len(m1) + trace(mdot(inv(sigma2), sigma1)) + \
    mdot((m2 - m1).T, inv(sigma2) , m2- m1))
def Matusita_kernel(cov_1, cov_2):
    
    p = np.shape(cov_1)[0]    
    
    det_1 = la.det(cov_1)
    det_2 = la.det(cov_2)
    det_sum = la.det(cov_1 + cov_2)
    return ((2 ** (p/2.0)) * (det_1 ** 0.25) * (det_2 ** 0.25))/(det_sum ** 0.5)
Example #13
0
def _ar_model_select(R, m, ne, p_range):
    """model order selection

    :Parameters:
        R : ndarray
            upper triangular mx from QR
        m : int
            state vector dimension
        ne : int
            number of bock equations of size m used in the estimation
        p_range : list
            list of model order to select from
    """

    # inits
    p_max = max(p_range)
    p_len = len(p_range)

    sbc = N.zeros(p_len)
    fpe = N.zeros(p_len)
    ldp = N.zeros(p_len)

    np = N.zeros(p_len)
    np[-1] = m * p_max

    # get lower right triangle of R
    #
    #     | R11  R12 |
    # R = |          |
    #     |  0   R22 |
    #
    R22 = R[np[-1]:np[-1] + m, :][:, np[-1]:np[-1] + m]
    invR22 = NL.inv(R22)
    Mp = N.dot(invR22, invR22.T)

    # model selection
    ldp[-1] = 2.0 * N.log(NL.det(R22))
    for i in reversed(xrange(p_len)):
        np[i] = m * p_range[i]
        if p_range[i] < p_max:

            # downdated part of R
            Rp = R[np[i]:np[i] + m, :][:, np[-1]:np[-1] + m]

            # woodbury formular
            L = NL.cholesky(N.eye(m) + N.dot(N.dot(Rp, Mp), Rp.T), lower=True)
            Np = N.dot(N.dot(NL.inv(L), Rp), Mp)
            Mp = Mp - N.dot(Np.T, Np)

            ldp[i] = ldp[i + 1] + 2.0 * N.log(NL.det(L))

        # selector metrics
        sbc[i] = ldp[i] / m - N.log(ne) * (ne - np[i]) / ne
        fpe[i] = ldp[i] / m - N.log(ne * (ne - np[i]) / (ne + np[i]))

    # return
    return sbc, fpe, ldp, np
Example #14
0
def main1():
    N = 4       # Число срезов во времени
    n = 1
    r = 1
    p = 1
    m = 1
    s = 2

    q = 1      # Число точек плана

    solver = IMFSolver(n=n, r=r, p=p, m=m, s=s, N=N)

    theta = [1.0, 1.0]

    solver.set_Phi([[theta[0]]])
    solver.set_diff_Phi_theta([[1.0]], 0)
    solver.set_diff_Phi_theta([[0.0]], 1)

    solver.set_Psi([[theta[1]]])
    solver.set_diff_Psi_theta([[0.0]], 0)
    solver.set_diff_Psi_theta([[1.0]], 1)

    solver.set_Gamma([[1.0]])
    solver.set_diff_Gamma_theta([[0.0]], 0)
    solver.set_diff_Gamma_theta([[0.0]], 1)

    solver.set_H([[1.0]])
    solver.set_diff_H_theta([[0.0]], 0)
    solver.set_diff_H_theta([[0.0]], 1)

    solver.set_Q([[0.1]])
    solver.set_diff_Q_theta([[0.0]], 0)
    solver.set_diff_Q_theta([[0.0]], 1)

    solver.set_R([[0.3]])
    solver.set_diff_R_theta([[0.0]], 0)
    solver.set_diff_R_theta([[0.0]], 1)

    solver.set_x0([[0.0]])
    solver.set_diff_x0_theta([[0.0]], 0)
    solver.set_diff_x0_theta([[0.0]], 1)

    solver.set_P0([[0.1]])
    solver.set_diff_P0_theta([[0.0]], 0)
    solver.set_diff_P0_theta([[0.0]], 1)

    solver.set_u([[1.0]], 0)
    solver.set_u([[1.0]], 1)
    solver.set_u([[1.0]], 2)
    solver.set_u([[2.0]], 3)

    M = solver.get_inf_matrix()
    print "M"
    print M
    print "la.det(M) = ", la.det(M)
    print "-np.log(la.det(M)) = ", -np.log(la.det(M))
def give_me_an_array(A, B, C):
    output = np.zeros((2,2))
### START YOUR CODE HERE ###
    output[0,0] = linalg.det(A+B.dot(linalg.inv(C)))
    output[0,1] = linalg.det(B-C.dot(linalg.inv(A)))
    output[1,0] = linalg.det(B-A.dot(linalg.inv(C)))
    output[1,1] = linalg.det(A+C.dot(linalg.inv(B)))

#### END YOUR CODE HERE ####
    return output
Example #16
0
    def _update_precisions(self, X, z):
        """Update the variational distributions for the precisions"""
        n_features = X.shape[1]
        if self.covariance_type == 'spherical':
            self.dof_ = 0.5 * n_features * np.sum(z, axis=0)
            for k in xrange(self.n_components):
                # could be more memory efficient ?
                sq_diff = np.sum((X - self.means_[k]) ** 2, axis=1)
                self.scale_[k] = 1.
                self.scale_[k] += 0.5 * np.sum(z.T[k] * (sq_diff + n_features))
                self.bound_prec_[k] = (
                    0.5 * n_features * (
                        digamma(self.dof_[k]) - np.log(self.scale_[k])))
            self.precs_ = np.tile(self.dof_ / self.scale_, [n_features, 1]).T

        elif self.covariance_type == 'diag':
            for k in xrange(self.n_components):
                self.dof_[k].fill(1. + 0.5 * np.sum(z.T[k], axis=0))
                sq_diff = (X - self.means_[k]) ** 2  # see comment above
                self.scale_[k] = np.ones(n_features) + 0.5 * np.dot(
                    z.T[k], (sq_diff + 1))
                self.precs_[k] = self.dof_[k] / self.scale_[k]
                self.bound_prec_[k] = 0.5 * np.sum(digamma(self.dof_[k])
                                                    - np.log(self.scale_[k]))
                self.bound_prec_[k] -= 0.5 * np.sum(self.precs_[k])

        elif self.covariance_type == 'tied':
            self.dof_ = 2 + X.shape[0] + n_features
            self.scale_ = (X.shape[0] + 1) * np.identity(n_features)
            for k in xrange(self.n_components):
                    diff = X - self.means_[k]
                    self.scale_ += np.dot(diff.T, z[:, k:k + 1] * diff)
            self.scale_ = linalg.pinv(self.scale_)
            self.precs_ = self.dof_ * self.scale_
            self.det_scale_ = linalg.det(self.scale_)
            self.bound_prec_ = 0.5 * wishart_log_det(
                self.dof_, self.scale_, self.det_scale_, n_features)
            self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)

        elif self.covariance_type == 'full':
            for k in xrange(self.n_components):
                sum_resp = np.sum(z.T[k])
                self.dof_[k] = 2 + sum_resp + n_features
                self.scale_[k] = (sum_resp + 1) * np.identity(n_features)
                diff = X - self.means_[k]
                self.scale_[k] += np.dot(diff.T, z[:, k:k + 1] * diff)
                self.scale_[k] = linalg.pinv(self.scale_[k])
                self.precs_[k] = self.dof_[k] * self.scale_[k]
                self.det_scale_[k] = linalg.det(self.scale_[k])
                self.bound_prec_[k] = 0.5 * wishart_log_det(self.dof_[k],
                                                            self.scale_[k],
                                                            self.det_scale_[k],
                                                           n_features)
                self.bound_prec_[k] -= 0.5 * self.dof_[k] * np.trace(
                    self.scale_[k])
Example #17
0
def INsc1(A):
    X = np.copy(A)
    E = 0.5*(np.eye(A.shape[0])-A)
    dA = spl.det(A)**(0.5)
    for i in range (1,5):
        print i
        detX =  spl.det(X)
        uk = np.abs(detX/dA)**(-1.0/i)
        Etk = (E + (0.5*X))/uk - (0.5)*uk*X
        X = uk*X + Etk
        print X
        E = (-0.5)*(Etk.dot(spl.inv(X))).dot(Etk)
    return X
Example #18
0
def NWsc(M, object=False):
    X = np.copy(M)
    dM = spl.det(M)**(1.0/2)
    ras = result()
    for i in range(1,31):
        print i
        uk = np.abs(spl.det(X)/dM)**(-1.0/i)
        X = (0.5)*(uk*X + (uk**(-1))*spl.inv(X).dot(M))
        print spl.norm(X.dot(X)-M)/spl.norm(M)
        ras.res.append((spl.norm(X.dot(X)-M)/spl.norm(M)))
    ras.iter = i
    ras.ris = X
    return (ras if object else X)
Example #19
0
    def _produceSamples(self):
        """ Append batchsize new samples and evaluate them. """
        if self.numLearningSteps == 0 or not self.importanceMixing:
            for _ in range(self.batchSize):
                self._produceNewSample()
            self.allGenerated.append(self.batchSize + self.allGenerated[-1])
        else:
            olds = len(self.allSamples)
            oldDetFactorSigma = det(self.allFactorSigmas[-2])
            newDetFactorSigma = det(self.factorSigma)
            invA = inv(self.factorSigma)
    
            # All pdfs computed here are off by a coefficient of 1/power(2.0*pi, self.numDistrParams/2.)
            # but as only their relative values matter, we ignore it.
            
            # stochastically reuse old samples, according to the change in distribution
            for s in range(olds - self.batchSize, olds):
                oldPdf = exp(-0.5 * dot(self.allPs[s], self.allPs[s])) / oldDetFactorSigma
                sample = self.allSamples[s]
                newPs = dot(invA.T, (sample - self.x))
                newPdf = exp(-0.5 * dot(newPs, newPs)) / newDetFactorSigma
                r = rand()
                if r < (1 - self.forcedRefresh) * newPdf / oldPdf:
                    self.allSamples.append(sample)
                    self.allFitnesses.append(self.allFitnesses[s])
                    self.allPs.append(newPs)
                # never use only old samples
                if (olds + self.batchSize) - len(self.allSamples) < self.batchSize * self.forcedRefresh:
                    break
            self.allGenerated.append(self.batchSize - (len(self.allSamples) - olds) + self.allGenerated[-1])

            # add the remaining ones
            oldInvA = inv(self.allFactorSigmas[-2])
            while  len(self.allSamples) < olds + self.batchSize:
                r = rand()
                if r < self.forcedRefresh:
                    self._produceNewSample()
                else:
                    while True:
                        p = randn(self.numParameters)
                        newPdf = exp(-0.5 * dot(p, p)) / newDetFactorSigma
                        sample = dot(self.factorSigma.T, p) + self.x
                        sample = array(map(float,sample[:lt.Dsize-1])+map(degree_bound,sample[lt.Dsize-1:]))
                        if useNearest:
                            sample = array(nearestPoint(sample[:lt.Dsize-1])+map(float,sample[lt.Dsize-1:]))
                        if validDist(sample):
                            break
                    oldPs = dot(oldInvA.T, (sample - self.allCenters[-2]))
                    oldPdf = exp(-0.5 * dot(oldPs, oldPs)) / oldDetFactorSigma
                    if r < 1 - oldPdf / newPdf:
                        self._produceNewSample(sample, p)
Example #20
0
def NWsc(M, object=False):
    W = np.array(MPDBsc((mp.matrix(M))).tolist(),dtype=np.float64)
    X = np.copy(M)
    dM = spl.det(M)**(1.0/2)
    ras = result()
    for i in range(1,31):
        uk = np.abs(spl.det(X)/dM)**(-1.0/i)
        X = (0.5)*(uk*X + (uk**(-1))*spl.inv(X).dot(M))
        print (spl.norm(X-W)/spl.norm(W))
        ras.res.append((spl.norm(X-W)/spl.norm(W)))
    ras.iter = i
    ras.ris = X

    return (ras if object else X)
Example #21
0
   def _getBetaT(self,XStar,Ap,D,Yt):
      P = []
      for i in range(self.M): P += (self.Kva + D[i]).tolist()
      P = np.array(P)
      L = np.kron(Ap,XStar)
      A = L.T * 1.0/(P+1.0)
      B = np.dot(A,L)
      Bi = linalg.inv(B)
      beta = np.dot(np.dot(Bi,A),Yt)

      _REML_part = np.log(linalg.det(np.dot(L.T,L))) + np.log(linalg.det(B))

      mu = np.dot(L,beta)
      return beta,mu,Bi,_REML_part
Example #22
0
def main():
    N = 20

    solver = IMFSolver(n=2, r=1, p=2, m=1, s=2, N=N)

    theta = [0.56, 0.48]

    solver.set_Phi([[1.0, 1.0], [-0.5, 0.0]])
    solver.set_diff_Phi_theta([[0.0, 0.0], [0.0, 0.0]], 0)
    solver.set_diff_Phi_theta([[0.0, 0.0], [0.0, 0.0]], 1)

    solver.set_Psi([[theta[0]], [theta[1]]])
    solver.set_diff_Psi_theta([[1.0], [0.0]], 0)
    solver.set_diff_Psi_theta([[0.0], [1.0]], 1)

    solver.set_Gamma([[1.0, 0.0], [0.0, 1.0]])
    solver.set_diff_Gamma_theta([[0.0, 0.0], [0.0, 0.0]], 0)
    solver.set_diff_Gamma_theta([[0.0, 0.0], [0.0, 0.0]], 1)

    solver.set_H([[1.0, 0.0]])
    solver.set_diff_H_theta([[0.0, 0.0]], 0)
    solver.set_diff_H_theta([[0.0, 0.0]], 1)

    solver.set_Q([[0.07, 0.0], [0.0, 0.07]])
    solver.set_diff_Q_theta([[0.0, 0.0], [0.0, 0.0]], 0)
    solver.set_diff_Q_theta([[0.0, 0.0], [0.0, 0.0]], 1)

    solver.set_R([[0.02]])
    solver.set_diff_R_theta([[0.0]], 0)
    solver.set_diff_R_theta([[0.0]], 1)

    solver.set_x0([[0.0], [0.0]])
    solver.set_diff_x0_theta([[0.0], [0.0]], 0)
    solver.set_diff_x0_theta([[0.0], [0.0]], 1)

    solver.set_P0([[0.1, 0.0], [0.0, 0.1]])
    solver.set_diff_P0_theta([[0.0, 0.0], [0.0, 0.0]], 0)
    solver.set_diff_P0_theta([[0.0, 0.0], [0.0, 0.0]], 1)

    for i in xrange(N):
        solver.set_u([[1.0]], i)

    M = solver.get_inf_matrix()
    print M
    print la.det(M)
    print -np.log(la.det(M))

    dM = solver.get_diff_inf_matrix_u(0, 10)
    print dM
    print np.trace(np.dot(la.inv(M), dM))
Example #23
0
def _debug_with_bound (itr, var_value, var_name, data, K, topicMean, topicCov, outDocCov, inDocCov, vocab, dtype, outMeans, outVarcs, inMeans, inVarcs, A, n):
    if np.isnan(var_value).any():
        printStderr ("WARNING: " + var_name + " contains NaNs")
    if np.isinf(var_value).any():
        printStderr ("WARNING: " + var_name + " contains INFs")
    if "dtype" in dir(var_value) and var_value.dtype != dtype:
        printStderr ("WARNING: dtype(" + var_name + ") = " + str(var_value.dtype))

    model = ModelState(K, topicMean, topicCov, outDocCov, vocab, A, False, dtype, MODEL_NAME)
    query = QueryState(outMeans, outVarcs, inMeans, inVarcs, inDocCov, n)

    old_bound = _debug_with_bound.old_bound
    bound     = var_bound(data, model, query)
    diff = "" if old_bound == 0 else "%15.4f" % (bound - old_bound)
    _debug_with_bound.old_bound = bound
    
    addendum = ""
    if var_name == "topicCov":
        try:
            addendum = "log det(topicCov) = %g" % (np.log(la.det(topicCov)))
        except:
            addendum = "log det(topicCov) = <undefined>"
    
    if isnan(bound):
        printStderr ("Bound is NaN")
    else:
        perp = perplexity_from_like(log_likelihood(data, model, query), data.word_count)
        if int(bound - old_bound) < 0:
            printStderr ("Iter %3d Update %-15s Bound %22f (%15s) (%5.0f)     %s" % (itr, var_name, bound, diff, perp, addendum))
        else:
            print ("Iter %3d Update %-15s Bound %22f (%15s) (%5.0f)  %s" % (itr, var_name, bound, diff, perp, addendum))
Example #24
0
    def pdf(self, data=None):
        """Probability density function (PDF).

        Parameters
        ----------
        data : array_like
            Grid of point to evaluate PDF at.

            (k,) - one observation, k dimensions

            (T, k) - T observations, k dimensions

        Returns
        -------
        (T, ) array
            PDF values

        """
        ndim = self.lam.size
        if data is None:
            raise ValueError('No data given!')
        self.data = np.atleast_2d(data)
        # (T, k) array
        diff = self.data - self.const_mu()
        # (k, T) array
        diff_norm = scl.solve(self.const_sigma(), diff.T)
        # (T, ) array
        diff_sandwich = (diff.T * diff_norm).sum(0)
        term1 = ((np.pi * self.eta) ** ndim
            * scl.det(self.const_sigma())) **(-.5)
        term2 = np.exp(gammaln((self.eta + self.ndim) / 2)
            - gammaln(self.eta / 2))
        term3 = (1 + diff_sandwich / self.eta) ** (- (self.eta + ndim) / 2)
        return term1 * term2 * term3
Example #25
0
def multivariateNormalPdf(z, x, sigma):
    """ The pdf of a multivariate normal distribution (not in scipy).
    The sample z and the mean x should be 1-dim-arrays, and sigma a square 2-dim-array. """
    assert len(z.shape) == 1 and len(x.shape) == 1 and len(x) == len(z) and sigma.shape == (len(x), len(z))
    tmp = -0.5 * dot(dot((z - x), inv(sigma)), (z - x))
    res = (1. / power(2.0 * pi, len(z) / 2.)) * (1. / sqrt(det(sigma))) * exp(tmp)
    return res
 def in_cirumcircle( a, b, c, test ):
     ax, ay = points[a];     bx, by = points[b]
     cx, cy = points[c];     dx, dy = points[test]
     l1 = [ ax-dx, ay-dy, (ax**2-dx**2) + (ay**2-dy**2) ]
     l2 = [ bx-dx, by-dy, (bx**2-dx**2) + (by**2-dy**2) ]
     l3 = [ cx-dx, cy-dy, (cx**2-dx**2) + (cy**2-dy**2) ]
     return linalg.det( c_[l1, l2, l3] ) > 0        
Example #27
0
    def test_for_simetric_indefinite_matrix(self):

        # Define test matrix A.
        # Note that the leading 5x5 submatrix is indefinite.
        A = np.asarray([[1, 2, 3, 7, 8],
                        [2, 5, 5, 9, 0],
                        [3, 5, 11, 1, 2],
                        [7, 9, 1, 7, 5],
                        [8, 0, 2, 5, 8]])

        # Get Cholesky from lapack functions
        cholesky, = get_lapack_funcs(('potrf',), (A,))

        # Compute Cholesky Decomposition
        c, k = cholesky(A, lower=False, overwrite_a=False, clean=True)

        delta, v = singular_leading_submatrix(A, c, k)

        A[k-1, k-1] += delta

        # Check if the leading submatrix is singular.
        assert_array_almost_equal(det(A[:k, :k]), 0)

        # Check if `v` fullfil the specified properties
        quadratic_term = np.dot(v, np.dot(A, v))
        assert_array_almost_equal(quadratic_term, 0)
Example #28
0
def eigen(energy, rot, mx, V, R, mu):
    """ determine eigen energy solution based.

    Parameters
    ----------
    energy : float
        energy (eV) of the attempted solution
    rot : int
        rotational quantum number
    mx : int
        matching point index, for inward and outward solutions
    V : numpy 3d array
        potential energy curve and coupling matrix
    R : numpy 1d array
        internuclear distance grid
    mu : float
        reduced mass in kg

    Returns
    -------
    eigenvalue : float
        energy of the solution

    """

    WI = WImat(energy, rot, V, R, mu)
    RI = RImat(WI, mx)
    
    # | R_mx - R^-1_mx+1 |
    return linalg.det(linalg.inv(RI[mx])-RI[mx+1])
Example #29
0
def DBsc(M, object=False):
    ras = result()
    Xk = np.copy(M)
    Yk = np.eye(M.shape[0])
    dA = spl.det(M)**(1.0/2)
    for i in range(1,31):
        uk = np.abs(spl.det(Xk)/dA)**(-1.0/i)
        Xk1 = (Xk*uk + (uk**-1)*np.linalg.inv(Yk))/2
        Yk1 = (Yk*uk + (uk**-1)*np.linalg.inv(Xk))/2
        Xk = Xk1
        Yk = Yk1
        ras.res.append((spl.norm(Xk.dot(Xk)-M)/spl.norm(M)))
    ras.iter = i
    ras.ris = Xk

    return (ras if object else Xk)
def KF(y, XF0, VF0, F, H, G, Q, R, limy, ISW, OSW, m, N):
    if OSW == 1:
        XPS = np.zeros((N,m),dtype=np.float); XFS = np.zeros((N,m),dtype=np.float)
        VPS = np.zeros((N,m,m),dtype=np.float); VFS = np.zeros((N,m,m),dtype=np.float)
    XF = XF0; VF = VF0; NSUM = 0.0; SIG2 = 0.0; LDET = 0.0    
    for n in xrange(N):
        # 1期先予測
        XP = np.ndarray.flatten( np.dot(F, XF.T) ) #2週目から縦ベクトルになってしまうので、常に横ベクトルに変換
        VP = np.dot( np.dot(F, VF), F.T ) +  np.dot( np.dot(G, Q), G.T)
        # フィルタ
        # Rは操作しなければ縦ベクトル。pythonは横ベクトルになるので注意!
        if y[n] < limy: 
            NSUM = NSUM + 1
            
            B = np.dot( np.dot(H, VP), H.T)  + R  # Hは数学的には横ベクトル
            B1 = inverse(B) # nvar次元の縦ベクトル
            K = np.matrix(np.dot(VP, H.T)) * np.matrix(B1) # Kは縦ベクトルになる(matrix)
            e = np.array(y[n]).T - np.dot(H, XP.T) # nvar次元の縦ベクトル            
            XF = np.array(XP) + np.array( K * np.matrix(e) ).T # 横ベクトル
            VF = np.array(VP) - np.array( K* np.matrix(H) * VP)           
            SIG2 = SIG2 + np.ndarray.flatten(np.array( np.matrix(e) * np.matrix(B1) * np.matrix(e).T ))[0] # 1次元でも計算できるようにmatrixにする
            LDET = LDET + math.log(linalg.det(B))
        else:
            XF = XP; VF = VP
        if OSW == 1:
            XPS[n,:] = XP; XFS[n,:] = XF; VPS[n,:,:] = VP; VFS[n,:,:] = VF
    SIG2 = SIG2 / NSUM
    if ISW == 0:
        FF = -0.5 * (NSUM * (math.log(2 * np.pi * SIG2) + 1) + LDET)
    else:
        FF = -0.5 * (NSUM * (math.log(2 * np.pi) + SIG2) + LDET)
    if OSW == 0:
        return {'LLF':FF, 'Ovar':SIG2}
    if OSW == 1:
        return {'XPS':XPS, 'XFS':XFS, 'VPS':VPS, 'VFS':VFS, 'LLF':FF, 'Ovar':SIG2}
Example #31
0
    print(i)

a=np.arange(10)
#[ 0  1  4  9 16 25 36 49 64 81]
print(a**2)

###
###scipy的使用
###
from scipy import linalg
a=np.array([[1,2],[30,4]])
print(a)
#二阶方阵行列式
#注意:推荐用scipy.linalg代替numpy.linalg
#-56.0
print(linalg.det(a))

###
###Pandas的使用,数据结构:Series和DataFrame
###
import pandas as pd

s=pd.Series([2,4,5,np.nan,8,9])
print(s)

#DatetimeIndex(['2017-12-01', '2017-12-02', '2017-12-03', '2017-12-04',
#               '2017-12-05', '2017-12-06', '2017-12-07'],
#              dtype='datetime64[ns]', freq='D')
dates=pd.date_range('20171201',periods=7)
print(dates)
Example #32
0
from scipy import linalg
import numpy as np

mat = np.array([
    [2, 3, 1],
    [4, 9, 10],
    [10, 5, 6],
])

print(mat)

det = linalg.det(mat)
inv = linalg.inv(mat)
print("determinant of the matrix", det)
print("inverse of matrix\n", inv)

# singular value decomposition
comp1, comp2, comp3 = linalg.svd(mat)
print('perform svd')
print(comp1)
print(comp2)
print(comp3)

# stats module
from scipy import stats

# normal distribution with mean 3 and standard deviation 5
rvs_20 = stats.norm.rvs(3, 5, size=20)
print(rvs_20)

cdf_ = stats.beta.cdf(0.42, a=100, b=50)
Example #33
0
def calcComplexGradient(S, H1, v2, Psi, Wg, Rg):
    Bra = Psi.copy()
    Ket = Psi.copy()

    #denominator and numerator of energy
    D = 0.0
    N = 0.0
    #gradient with respect to a,b orbital parameter of the denominator and numerator of energy
    Dab = np.zeros(Psi.shape, dtype = complex)
    Nab = np.zeros(Psi.shape, dtype = complex)
    #gradient with respect to a,b* orbital parameter of the denominator and numerator of energy
    Dab_bar = np.zeros(Psi.shape, dtype = complex)
    Nab_bar = np.zeros(Psi.shape, dtype = complex)

    for i in range(len(Wg)):
        Ketg = Rg[i].dot(Ket)

        #overlap quantities
        O = Bra.conj().T.dot(S).dot(Ketg)
        invO = lalg.inv(O)
        detO = lalg.det(O)

        #derivative of detO with respect to a,b orbital parameter
        detOab = detO * np.einsum('bi,ji,ja,a->ab', invO, Psi.conj(), S, np.diag(Rg[i]), dtype = complex, optimize = True)
        detOab_bar = detO * np.einsum('ai,i,ij,jb->ab', S, np.diag(Rg[i]), Psi, invO, dtype = complex, optimize = True)

        #density matrix
        dm1 = Ketg.dot(invO).dot(Bra.conj().T)

        #hamiltonian quantities
        G1 = contractEri(v2, dm1)
        F1 = H1 + 0.5 * G1
        H = np.einsum('pq,qp->', F1, dm1, dtype = complex, optimize = True)

        #drivative of H with respect to a,b orbital parameter
        M = H1 + G1

        Aab = np.einsum('bi,pi,pa,a->ab', invO, Psi.conj(), M, np.diag(Rg[i]), dtype = complex, optimize = True)
        Baj = np.einsum('jk,lk,la,a->aj', invO, Psi.conj(), S, np.diag(Rg[i]), dtype = complex, optimize = True)
        Hab = Aab - np.einsum('qb,qj,aj->ab', Aab, Psi, Baj, dtype = complex, optimize = True)

        Aab_bar = np.einsum('aq,q,qi,ib->ab', M, np.diag(Rg[i]), Psi, invO, dtype = complex, optimize = True)
        Bak_bar = np.einsum('ai,i,ij,jk->ak', S, np.diag(Rg[i]), Psi, invO, dtype = complex, optimize = True)
        Hab_bar = Aab_bar - np.einsum('ak,pk,pb->ab', Bak_bar, Psi.conj(), Aab_bar, dtype =  complex, optimize = True)

        #averages with symmetry weights
        D += Wg[i] * detO
        Dab += Wg[i] * detOab
        Dab_bar += Wg[i] * detOab_bar

        N += Wg[i] * detO * H
        Nab += Wg[i] * (detOab * H + detO * Hab)
        Nab_bar += Wg[i] * (detOab_bar * H + detO * Hab_bar)

    #for complex conjugation projection, calculate overlaps with the conjugate of the ket, note that the only nonzero derivatives are with respect to a,b* parameters
    for i in range(len(Wg)):
        Ketg = Rg[i].dot(Ket.conj())

        #overlap quantities
        O = Bra.conj().T.dot(S).dot(Ketg)
        invO = lalg.inv(O)
        detO = lalg.det(O)

        #derivative of detO with respect to a,b orbital parameter
        detOab_bar  = detO * np.einsum('ai,i,ij,jb->ab', S, np.diag(Rg[i]), Psi.conj(), invO, dtype = complex, optimize = True)
        #detOab_bar += detO * np.einsum('bi,ji,ja,a->ab', invO, Psi.conj(), S, np.diag(Rg[i]), dtype = complex, optimize = True)
        detOab_bar = detOab_bar + detOab_bar

        #density matrix
        dm1 = Ketg.dot(invO).dot(Bra.conj().T)

        #hamiltonian quantities
        G1 = contractEri(v2, dm1)
        F1 = H1 + 0.5 * G1
        H = np.einsum('pq,qp->', F1, dm1, dtype = complex, optimize = True)

        #drivative of H with respect to a,b orbital parameter
        M = H1 + G1

        A1ab_bar = np.einsum('bi,pi,pa,a->ab', invO, Psi.conj(), M, np.diag(Rg[i]), dtype = complex, optimize = True)
        #A2ab_bar = np.einsum('aq,q,qi,ib->ab', M, np.diag(Rg[i]), Psi.conj(), invO, dtype = complex, optimize = True)
        A2ab_bar = A1ab_bar

        C1ak_bar = np.einsum('ai,i,ij,jk->ak', S, np.diag(Rg[i]), Psi.conj(), invO, dtype = complex, optimize = True)
        #D1pb_bar = np.einsum('pq,q,ql,lb->pb', M, np.diag(Rg[i]), Psi.conj(), invO, dtype = complex, optimize = True)
        D1pb_bar = A1ab_bar
        B1ab_bar = - np.einsum('ak,pk,pb->ab', C1ak_bar, Psi.conj(), D1pb_bar, optimize = True)

        #C2bq_bar = np.einsum('bi,pi,pq,q->bq', invO, Psi.conj(), M, np.diag(Rg[i]), dtype = complex, optimize = True)
        #D2ja_bar = np.einsum('jk,lk,la,a->ja', invO, Psi.conj(), S, np.diag(Rg[i]), dtype = complex, optimize = True)
        #B2ab_bar = - np.einsum('bq,qj,ja->ab', C2bq_bar, Psi.conj(), D2ja_bar, dtype = complex, optimize = True)
        B2ab_bar = B1ab_bar

        Hab_bar = A1ab_bar + A2ab_bar + B1ab_bar + B2ab_bar

        #averages with symmetry weights
        D += Wg[i] * detO
        Dab_bar += Wg[i] * detOab_bar

        N += Wg[i] * detO * H
        Nab_bar += Wg[i] * (detOab_bar * H + detO * Hab_bar)

    E = N / D
    J = Nab / D - E * (Dab / D)
    J_bar = Nab_bar / D - E * (Dab_bar / D)
    return J, J_bar
Example #34
0
 def test_simple(self):
     a = [[1, 2], [3, 4]]
     a_det = det(a)
     assert_almost_equal(a_det, -2.0)
Example #35
0
#!/usr/bin/python
# coding=utf8

import numpy as np
from scipy import linalg

__author__ = 'Jam'
__date__ = '2019/7/17 13:29'

print("-" * 70)
a = np.array([[3, 2, 0], [1, -1, 0], [0, 5, 1]])
b = np.array([2, 4, -2])
res1 = linalg.solve(a, b)
print(res1)

print("-" * 70)
A = np.array([[1, 2], [3, 4]])
res2 = linalg.det(A)
print(res2)

print("-" * 70)
A = np.array([[1, 2], [3, 4]])
l, v = linalg.eig(A)
print(l, v)

print("-" * 70)
a = np.random.randn(2, 3) + 1.j * np.random.randn(2, 3)
U, s, Vh = linalg.svd(a)
print(U, Vh, s)
Example #36
0
def zonotope_sampler(A_zono, **params):
    """ MCMC based sampler for projection DPPs.
    The similarity matrix is the orthogonal projection matrix onto
    the row span of the feature vector matrix.
    Samples are of size equal to the ransampl_size of the projection matrix
    also equal to the rank of the feature matrix (assumed to be full row rank).

    :param A_zono:
        Feature vector matrix, feature vectors are stacked columnwise.
        It is assumed to be full row rank.
    :type A_zono:
        array_like

    :param params: Dictionary containing the parameters

        - ``'lin_obj'`` (list): Linear objective (:math:`c`) of the linear program used to identify the tile in which a point lies. Default is a random Gaussian vector.
        - ``'x_0'` (list): Initial point.
        - ``'nb_iter'`` (int): Number of iterations of the MCMC chain. Default is 10.
        - ``'T_max'`` (float): Maximum running time of the algorithm (in seconds).
        Default is None.
        - ``'random_state`` (default None)
    :type params: dict

    :return:
        MCMC chain of approximate samples (stacked row_wise i.e. nb_iter rows).
    :rtype:
        array_like

    .. seealso::

        Algorithm 5 in :cite:`GaBaVa17`

        - :func:`extract_basis <extract_basis>`
        - :func:`basis_exchange_sampler <basis_exchange_sampler>`
    """
    # For zonotope sampler
    try:
        from cvxopt import matrix, spmatrix, solvers
    except ImportError:
        raise ValueError(
            'The cvxopt package is required to use the zonotype sampler (see setup.py).'
        )

    solvers.options['show_progress'] = params.get('show_progress', False)
    solvers.options['glpk'] = {
        'msg_lev': params.get('show_progress', 'GLP_MSG_OFF')
    }

    rng = check_random_state(params.get('random_state', None))

    r, N = A_zono.shape  # Sizes of r=samples=rank(A_zono), N=ground set
    # Linear objective
    c = matrix(params.get('lin_obj', rng.randn(N)))
    # Initial point x0 = A*u, u~U[0,1]^n
    x0 = matrix(params.get('x_0', A_zono.dot(rng.rand(N))))

    nb_iter = params.get('nb_iter', 10)
    T_max = params.get('T_max', None)

    ###################
    # Linear problems #
    ###################
    # Canonical form
    # min       c.T*x         min     c.T*x
    # s.t.  G*x <= h    <=>   s.t.    G*x + s = h
    #        A*x = b                      A*x = b
    #                                      s >= 0
    # CVXOPT
    # =====> solvers.lp(c, G, h, A, b, solver='glpk')
    #################################################

    # To access the tile Z(B_x)
    # Solve P_x(A,c)
    ######################################################
    # y^* =
    # argmin  c.T*y               argmin  c.T*y
    # s.t.  A*y = x         <=>   s.t.  A  *y  = x
    #       0 <= y <= 1             [ I_n] *y <= [1^n]
    #                               [-I_n]       [0^n]
    ######################################################
    # Then B_x = \{ i ; y_i^* \in ]0,1[ \}

    A = spmatrix(0.0, [], [], (r, N))
    A[:, :] = A_zono

    G = spmatrix(0.0, [], [], (2 * N, N))
    G[:N, :] = spmatrix(1.0, range(N), range(N))
    G[N:, :] = spmatrix(-1.0, range(N), range(N))

    # Endpoints of segment
    # D_x \cap Z(A) = [x+alpha_m*d, x-alpha_M*d]
    ###########################################################################
    # alpha_m/_M = argmin  +/-alpha      argmin [+/-1 0^N].T * [alpha,lambda]
    # s.t.    x + alpha d = A lambda <=> s.t.  [-d A] *[alpha, lambda] = x
    #         0 <= lambda <= 1             [0^N I_N] *[alpha, lambda] <= [1^N]
    #                                      [0^N -I_N]                    [0^N]
    ##########################################################################

    c_mM = matrix(0.0, (N + 1, 1))
    c_mM[0] = 1.0

    A_mM = spmatrix(0.0, [], [], (r, N + 1))
    A_mM[:, 1:] = A

    G_mM = spmatrix(0.0, [], [], (2 * N, N + 1))
    G_mM[:, 1:] = G

    # Common h to both kind of LP
    # cf. 0 <= y <= 1 and 0 <= lambda <= 1
    h = matrix(0.0, (2 * N, 1))
    h[:N, :] = 1.0

    ##################
    # Initialization #
    ##################
    B_x0 = []
    while len(B_x0) != r:
        # Initial tile B_x0
        # Solve P_x0(A,c)
        y_star = solvers.lp(c, G, h, A, x0, solver='glpk')['x']
        # Get the tile
        B_x0 = extract_basis(np.asarray(y_star))

    # Initialize sequence of sample
    chain = np.zeros((nb_iter, r), dtype=int)
    chain[0] = B_x0

    # Compute the det of the tile (Vol(B)=abs(det(B)))
    det_B_x0 = la.det(A_zono[:, B_x0])

    t_start = time.time() if T_max else 0

    for it in range(1, nb_iter):

        # Take uniform direction d defining D_x0
        d = matrix(rng.randn(r, 1))

        # Define D_x0 \cap Z(A) = [x0 + alpha_m*d, x0 - alpha_M*d]
        # Update the constraint [-d A] * [alpha,lambda] = x
        A_mM[:, 0] = -d
        # Find alpha_m/M
        alpha_m = solvers.lp(c_mM, G_mM, h, A_mM, x0, solver='glpk')['x'][0]
        alpha_M = solvers.lp(-c_mM, G_mM, h, A_mM, x0, solver='glpk')['x'][0]

        # Propose x1 ~ U_{[x0+alpha_m*d, x0-alpha_M*d]}
        x1 = x0 + (alpha_m + (alpha_M - alpha_m) * rng.rand()) * d
        # Proposed tile B_x1
        # Solve P_x1(A,c)
        y_star = solvers.lp(c, G, h, A, x1, solver='glpk')['x']
        # Get the tile
        B_x1 = extract_basis(np.asarray(y_star))

        # Accept/Reject the move with proba Vol(B1)/Vol(B0)
        if len(B_x1) != r:  # if extract_basis returned smtg ill conditioned
            chain[it] = B_x0
        else:
            det_B_x1 = la.det(A_zono[:, B_x1])
            if rng.rand() < abs(det_B_x1 / det_B_x0):
                x0, B_x0, det_B_x0 = x1, B_x1, det_B_x1
                chain[it] = B_x1
            else:
                chain[it] = B_x0

        if T_max:
            if time.time() - t_start < T_max:
                break

    return chain.tolist()
Example #37
0
def func5(n):
    n = int(1.25*n)
    A = np.random.rand(n, n)
    la.det(A)
    B[j, j + 1] = -1
    B[j, j - 1] = -1

B[0, :] = [2, -1, 0, 0]
B[3, :] = [0, 0, -1, 2]

print 'A = '
print A
print ' '

print 'B = '
print B
print ' '

print '|A| = '
print la.det(A)
print ' '

print 'AB = '
print dot(A, B)
print ' '

print '*Element-wise* A*B = '
print A * B
print ' '

print 'A^T = '
print A.T
print ' '

print 'A^{-1} = '
Example #39
0
def nedelecBasisIterative(eleNodes, points, eleVol, lengthEdges, edgeOrder):
    ''' Compute the basis Nedelec functions in an iterative way for a
    set of points in a given element.

    :param ndarray eleNodes: nodal spatial coordinates of the element
    :param ndarray points: spatial coordinates of the evaluation points
    :param float eleVol: element's volume
    :param ndarray lengthEdges: element's edges defined by their length
    :param int edgeOrder: order of tetrahedral edge element
    :return: values of Nedelec functions.
    :rtype: ndarray.

    .. note: References:\n
       Jin, Jian-Ming. The finite element method in electromagnetics.
       John Wiley & Sons, 2002.
    '''
    # Coefficients computation. Running in a cycling way
    a = np.zeros([4], dtype=np.float64)
    b = np.zeros([4], dtype=np.float64)
    c = np.zeros([4], dtype=np.float64)
    d = np.zeros([4], dtype=np.float64)

    tmp = np.array([0, 1, 2, 3, 0, 1, 2], dtype=np.int)
    temp_ones = np.ones([3], dtype=np.float64)

    for iCoeff in np.arange(4):
        a[iCoeff] = det([[
            eleNodes[tmp[iCoeff + 1], 0], eleNodes[tmp[iCoeff + 2], 0],
            eleNodes[tmp[iCoeff + 3], 0]
        ],
                         [
                             eleNodes[tmp[iCoeff + 1],
                                      1], eleNodes[tmp[iCoeff + 2], 1],
                             eleNodes[tmp[iCoeff + 3], 1]
                         ],
                         [
                             eleNodes[tmp[iCoeff + 1],
                                      2], eleNodes[tmp[iCoeff + 2], 2],
                             eleNodes[tmp[iCoeff + 3], 2]
                         ]])
        b[iCoeff] = det([
            temp_ones,
            [
                eleNodes[tmp[iCoeff + 1], 1], eleNodes[tmp[iCoeff + 2], 1],
                eleNodes[tmp[iCoeff + 3], 1]
            ],
            [
                eleNodes[tmp[iCoeff + 1], 2], eleNodes[tmp[iCoeff + 2], 2],
                eleNodes[tmp[iCoeff + 3], 2]
            ]
        ])
        c[iCoeff] = det([
            temp_ones,
            [
                eleNodes[tmp[iCoeff + 1], 0], eleNodes[tmp[iCoeff + 2], 0],
                eleNodes[tmp[iCoeff + 3], 0]
            ],
            [
                eleNodes[tmp[iCoeff + 1], 2], eleNodes[tmp[iCoeff + 2], 2],
                eleNodes[tmp[iCoeff + 3], 2]
            ]
        ])
        d[iCoeff] = det([
            temp_ones,
            [
                eleNodes[tmp[iCoeff + 1], 0], eleNodes[tmp[iCoeff + 2], 0],
                eleNodes[tmp[iCoeff + 3], 0]
            ],
            [
                eleNodes[tmp[iCoeff + 1], 1], eleNodes[tmp[iCoeff + 2], 1],
                eleNodes[tmp[iCoeff + 3], 1]
            ]
        ])

    # Add signs
    sign = np.float64(-1.0)
    a[1] = a[1] * sign
    a[3] = a[3] * sign
    b[0] = b[0] * sign
    b[2] = b[2] * sign
    c[1] = c[1] * sign
    c[3] = c[3] * sign
    d[0] = d[0] * sign
    d[2] = d[2] * sign

    # Number of points
    if points.ndim == 1:
        nPoints = 1
    else:
        nPoints = points.shape[0]

    # Nedelec basis for all points
    if nPoints == 1:
        AA = np.float64(1.0) / ((np.float64(6.0) * eleVol)**2)
        # To reduce number of multiplications
        b1x = b[0] * points[0]
        b2x = b[1] * points[0]
        b3x = b[2] * points[0]
        b4x = b[3] * points[0]
        c1y = c[0] * points[1]
        c2y = c[1] * points[1]
        c3y = c[2] * points[1]
        c4y = c[3] * points[1]
        d1z = d[0] * points[2]
        d2z = d[1] * points[2]
        d3z = d[2] * points[2]
        d4z = d[3] * points[2]
        A1 = a[0] + b1x + c1y + d1z
        A2 = a[1] + b2x + c2y + d2z
        A3 = a[2] + b3x + c3y + d3z
        A4 = a[3] + b4x + c4y + d4z
        # Basis 1
        b1 = np.multiply([(b[1] * A1) - (b[0] * A2), (c[1] * A1) - (c[0] * A2),
                          (d[1] * A1) - (d[0] * A2)], lengthEdges[0])
        # Basis 2
        b2 = np.multiply([
            b[2] * A1 - b[0] * A3, c[2] * A1 - c[0] * A3, d[2] * A1 - d[0] * A3
        ], lengthEdges[1])
        # Basis 3
        b3 = np.multiply([
            b[3] * A1 - b[0] * A4, c[3] * A1 - c[0] * A4, d[3] * A1 - d[0] * A4
        ], lengthEdges[2])
        # Basis 4
        b4 = np.multiply([
            b[2] * A2 - b[1] * A3, c[2] * A2 - c[1] * A3, d[2] * A2 - d[1] * A3
        ], lengthEdges[3])
        # Basis 5
        b5 = np.multiply([
            b[1] * A4 - b[3] * A2, c[1] * A4 - c[3] * A2, d[1] * A4 - d[3] * A2
        ], lengthEdges[4])
        # Basis 6
        b6 = np.multiply([
            b[3] * A3 - b[2] * A4, c[3] * A3 - c[2] * A4, d[3] * A3 - d[2] * A4
        ], lengthEdges[5])

        basis = np.array(np.vstack((b1, b2, b3, b4, b5, b6)) * AA,
                         dtype=np.float64)
    # If not
    else:
        basis = np.zeros((edgeOrder, 3, nPoints), dtype=np.float64)
        AA = np.float64(1.0) / ((np.float64(6.0) * eleVol)**2)
        # Compute basis for each point
        for iP in np.arange(nPoints):
            # To reduce number of multiplications
            b1x = b[0] * points[iP, 0]
            b2x = b[1] * points[iP, 0]
            b3x = b[2] * points[iP, 0]
            b4x = b[3] * points[iP, 0]
            c1y = c[0] * points[iP, 1]
            c2y = c[1] * points[iP, 1]
            c3y = c[2] * points[iP, 1]
            c4y = c[3] * points[iP, 1]
            d1z = d[0] * points[iP, 2]
            d2z = d[1] * points[iP, 2]
            d3z = d[2] * points[iP, 2]
            d4z = d[3] * points[iP, 2]
            A1 = a[0] + b1x + c1y + d1z
            A2 = a[1] + b2x + c2y + d2z
            A3 = a[2] + b3x + c3y + d3z
            A4 = a[3] + b4x + c4y + d4z
            # Basis 1
            b1 = np.multiply([(b[1] * A1) - (b[0] * A2),
                              (c[1] * A1) - (c[0] * A2),
                              (d[1] * A1) - (d[0] * A2)], lengthEdges[0])
            # Basis 2
            b2 = np.multiply([
                b[2] * A1 - b[0] * A3, c[2] * A1 - c[0] * A3,
                d[2] * A1 - d[0] * A3
            ], lengthEdges[1])
            # Basis 3
            b3 = np.multiply([
                b[3] * A1 - b[0] * A4, c[3] * A1 - c[0] * A4,
                d[3] * A1 - d[0] * A4
            ], lengthEdges[2])
            # Basis 4
            b4 = np.multiply([
                b[2] * A2 - b[1] * A3, c[2] * A2 - c[1] * A3,
                d[2] * A2 - d[1] * A3
            ], lengthEdges[3])
            # Basis 5
            b5 = np.multiply([
                b[1] * A4 - b[3] * A2, c[1] * A4 - c[3] * A2,
                d[1] * A4 - d[3] * A2
            ], lengthEdges[4])
            # Basis 6
            b6 = np.multiply([
                b[3] * A3 - b[2] * A4, c[3] * A3 - c[2] * A4,
                d[3] * A3 - d[2] * A4
            ], lengthEdges[5])

            basis[:, :, iP] = np.vstack((b1, b2, b3, b4, b5, b6)) * AA

    return basis
Example #40
0
 def mlogdet(self):
     return np.log(linalg.det(self.m))
Example #41
0
 def mdet(self):
     return linalg.det(self.m)
Example #42
0
    def do_nsd(self, row, col):
        r"""Evaluates by numerical steepest descent the integral
        :math:`\langle \Phi_i | f | \Phi^\prime_j \rangle` for a polynomial
        function :math:`f(x)` with :math:`x \in \mathbb{R}^D`.

        :param row: The index :math:`i` of the component :math:`\Phi_i` of :math:`\Psi`.
        :param row: The index :math:`j` of the component :math:`\Phi^\prime_j` of :math:`\Psi^\prime`.
        :return: A complex valued matrix of shape :math:`|\mathfrak{K}_i| \times |\mathfrak{K}^\prime_j|`.
        """
        D = self._packet.get_dimension()
        N = self._packet.get_number_components()
        eps = self._packet.get_eps()
        Pibra = self._pacbra.get_parameters(component=row)
        Piket = self._packet.get_parameters(component=col)
        Pimix = self.mix_parameters(Pibra[:4], Piket[:4])

        # Combine oscillators
        A, b, c = self.build_bilinear(Pibra[:4], Piket[:4])

        # Schur decomposition of A = U^H T U
        T, U = schur(A, output="complex")
        U = conjugate(transpose(U))

        # Oscillator updates
        for i in range(1, D):
            if T[i - 1, i - 1] == 0:
                # TODO: Prove that this never happens or handle it correctly!
                print("Warning: 'update_oscillator' encountered a RESIDUE situation!")

            # Diagonal Elements
            for j in range(i, D):
                T[j, j] = T[j, j] - T[i - 1, j]**2 / (4.0 * T[i - 1, i - 1])

            # Others
            for rowi in range(i, D):
                for coli in range(rowi + 1, D):
                    T[rowi, coli] = T[rowi, coli] - T[i - 1, rowi] * T[i - 1, coli] / (2 * T[i - 1, i - 1])

        # Compute remaining parts
        X = inv(A + transpose(A))
        ctilde = c - 0.5 * dot(transpose(b), dot(X, b))

        # Prefactor originating from constant term c
        eps = self._packet.get_eps()
        w = 1.0 / eps**2
        prefactor = exp(1.0j * w * ctilde)

        # Take out diagonals of T
        Dk = diag(T).reshape((D, 1))
        # Tau (path parametrization variable)
        tk = self._nodes / sqrt(w)

        # Path Precomposition
        Tu = 0.5 * triu(T, 1) / Dk
        paths = (sqrt(1.0j / Dk) * tk).astype(complexfloating)
        for i in reversed(range(D)):
            paths[i, :] = paths[i, :] - dot(Tu[i, :], paths)

        # Path derivatives
        pathderivs = sqrt(1.0j / Dk)
        pdp = product(pathderivs, axis=0)

        # Backtransformation of paths
        pathst = dot(conjugate(transpose(U)), paths) - dot(X, b)

        # Another normalization prefactor
        # This is what differs the constant part of phi_0 from 1.
        # We loose it when dividing by phi_0 hence manually add it again.
        # TODO: Do we need mixing parameters here?
        #       Preliminary answer: no
        fr = (pi * eps**2)**(-0.25 * D) * 1.0 / sqrt(det(Pibra[2]))
        fc = (pi * eps**2)**(-0.25 * D) * 1.0 / sqrt(det(Piket[2]))
        normfactor = conjugate(fr) * fc

        # Compute global phase difference
        phase = exp(1.0j / eps**2 * (Piket[4] - conjugate(Pibra[4])))

        # Non-oscillatory parts
        # Wavepacket
        # TODO: This is a huge hack: division by phi_0 not stable?
        basisr = self._pacbra.evaluate_basis_at(conjugate(pathst), row, prefactor=False)
        basisr = basisr / basisr[0, :]
        basisc = self._packet.evaluate_basis_at(pathst, col, prefactor=False)
        basisc = basisc / basisc[0, :]
        # Basis division by phi0 may introduce NaNs
        #basisr = nan_to_num(basisr)
        #basisc = nan_to_num(basisc)

        # Operator should support the component notation for efficiency
        if self._eval_at_once is True:
            # TODO: Sure, this is inefficient, but we can not do better right now.
            opath = self._operator(pathst, Pimix[0])[row * N + col]
        else:
            opath = self._operator(pathst, Pimix[0], entry=(row, col))

        # Do the quadrature
        quadrand = (opath * pdp * self._weights).reshape((-1,))
        # Sum up matrices over all quadrature nodes
        M = einsum("k,ik,jk", quadrand, conjugate(basisr), basisc)

        return phase * normfactor * prefactor * M / sqrt(w)**D
Example #43
0
def lndetC(sij, x, hessian=False):
    '''
    f = ln det C = ln det F^{-1} = -ln det F
    where F = \sum_m^M x_m v_m.v_m^t
    
    By Jacob's formula
    
    df/dx_m = -tr(F^{-1}.(v_m.v_m^t))

    The second derivative is 
    
    d^2 f/dx_a dx_b = -tr( dC/dx_b.v_a.v_a^t)
                    = tr( C.dF/dx_b.C.v_a.v_a^t)
                    = tr( C.v_b.v_b^t.C.v_a.v_a^t)
    
    Return:
    tuple (f, d/dx f) if hessian is false
    tuple (f, d/dx f, d^2/dx^2 f) if hessian is true.
    '''
    if not isinstance(sij, matrix): sij = matrix(sij)
    K = sij.size[0]
    M = K * (K + 1) / 2
    F = matrix(0., (K, K))
    for i in xrange(K):
        # n_{ii}*v_{ii}.v_{ii}^t
        F[i, i] += x[i] / (sij[i, i] * sij[i, i])
        for j in xrange(i + 1, K):
            m = measurement_index(i, j, K)
            v2 = x[m] / (sij[i, j] * sij[i, j])
            F[i, i] += v2
            F[j, j] += v2
            F[i, j] = F[j, i] = -v2
    C = linalg.inv(F)
    fval = -np.log(linalg.det(F))
    df = matrix(0., (1, M))
    for i in xrange(K):
        df[i] = -C[i, i] / (sij[i, i] * sij[i, i])
        for j in xrange(i + 1, K):
            m = measurement_index(i, j, K)
            df[m] = (2 * C[i, j] - C[i, i] - C[j, j]) / (sij[i, j] * sij[i, j])
    if not hessian:
        return (fval, df)
    # Compute the Hessian
    d2f = matrix(0., (M, M))
    for i in xrange(K):
        for j in xrange(i, K):
            # d^2/dx_i dx_j = C_{ij}^2/(s_{ii}^2 s_{jj}^2)
            d2f[i, j] = C[i, j] * C[i, j] / (sij[i, i] * sij[i, i] *
                                             sij[j, j] * sij[j, j])
            d2f[j, i] = d2f[i, j]
        for i2 in xrange(K):
            for j2 in xrange(i2 + 1, K):
                m2 = measurement_index(i2, j2, K)
                # d^2/dx_id_x(i',j') = (C_{ii'}-C_{ji'})^2/(s_{i'i'}^2 s_{ij}^2)
                dC = C[i2, i] - C[j2, i]
                d2f[i, m2] = dC * dC / (sij[i, i] * sij[i, i] * sij[i2, j2] *
                                        sij[i2, j2])
                d2f[m2, i] = d2f[i, m2]
        for j in xrange(i + 1, K):
            m = measurement_index(i, j, K)
            invs2 = 1 / (sij[i, j] * sij[i, j])
            for i2 in xrange(i, K):
                for j2 in xrange(i2 + 1, K):
                    m2 = measurement_index(i2, j2, K)
                    # d^2/dx_{ij}dx_{i'j'} =
                    # (C_{ii'}+C_{jj'}-C_{ji'}-C_{ij'})^2/(s_{i'j'}^2 s_{ij}^2)
                    dC = C[i, i2] + C[j, j2] - C[j, i2] - C[i, j2]
                    d2f[m, m2] = dC * dC * invs2 / (sij[i2, j2] * sij[i2, j2])
                    d2f[m2, m] = d2f[m, m2]
    return (fval, df, d2f)
import numpy as np
from scipy import linalg
A = np.array([[1, 8, -9, 7, 5], [0, 1, 0, 4, 4], [0, 0, 1, 2, 5],
              [0, 0, 0, 1, -5], [0, 0, 0, 0, 1]])
det_A = linalg.det(A)
print("Determinant of the matrix\n")
print(det_A)
inv_A = linalg.pinv(A)
print("\nInverse of the matrix\n")
print(inv_A)
print("\nTranspose of the matrix\n")
transpose = np.matrix.transpose(A)
print(transpose)
eig_A = linalg.eig(A)
print("\nEigen pairs of the matrix\n")
print(eig_A)
Example #45
0
def align_wahba(dt, theta, dv, lat, VE=None, VN=None):
    """Estimate attitude matrix by solving Wahba's problem.

    This method is based on solving a least-squares problem for a direction
    cosine matrix A (originally formulated in [1]_)::

        L = sum(||A r_i - b_i||^2, i=1, ..., m) -> min A,
        s. t. A being a right orthogonal matrix.

    Here ``(r_i, b_i)`` are measurements of the same unit vectors in two
    frames.

    The application of this method to self alignment of INS is explained in
    [2]_. In this problem the vectors ``(r_i, b_i)`` are normalized velocity
    increments due to gravity. It is applicable to dynamic conditions as well,
    but in this case a full accuracy can be achieved only if velocity is
    provided.

    The optimization problem is solved using the most straightforward method
    based on SVD [3]_.

    Parameters
    ----------
    dt : double
        Sensors sampling period.
    theta, dv : array_like, shape (n_samples, 3)
        Rotation vectors and velocity increments computed from gyro and
        accelerometer readings after applying coning and sculling
        corrections.
    lat : float
        Latitude of the place.
    VE, VN : array_like with shape (n_samples + 1, 3) or None
        East and North velocity of the target. If None (default), it is
        assumed to be 0. See Notes for further details.

    Returns
    -------
    hpr : tuple of 3 floats
        Estimated heading, pitch and roll at the end of the alignment.
    P_align : ndarray, shape (3, 3)
        Covariance matrix of misalignment angles, commonly known as
        "phi-angle" in INS literature. Its values are measured in degrees
        squared. This matrix is estimated in a rather ad-hoc fashion, see
        Notes.

    Notes
    -----
    If the alignment takes place in dynamic conditions but velocities `VE`
    and `VN` are not provided, the alignment accuracy will be decreased (to
    some extent it will be reflected in `P_align`). Note that `VE` and `VN` are
    required with the same rate as inertial readings (and contain 1 more
    sample). It means that you usually have to do some sort of interpolation.
    In on-board implementation you just provide the last available velocity
    data from GPS and it will work fine.

    The paper [3]_ contains a recipe of computing the covariance matrix given
    that errors in measurements are independent, small and follow a statistical
    distribution with zero mean and known variance. In our case we estimate
    measurement error variance from the optimal value of the optimized function
    (see above). But as our errors are not independent and necessary small
    (nor they follow any reasonable distribution) we don't scale their
    variance by the number of observations (which is commonly done for the
    variance of an average value). Some experiments show that this approach
    gives reasonable values of `P_align`.

    Also note, that `P_align` accounts only for misalignment errors due
    to non-perfect alignment conditions. In addition to that, azimuth accuracy
    is always limited by gyro drifts and level accuracy is limited by the
    accelerometer biases. You should add these systematic uncertainties to the
    diagonal of `P_align`.

    References
    ----------
    .. [1] G. Wahba, "Problem 65–1: A Least Squares Estimate of Spacecraft
           Attitude", SIAM Review, 1965, 7(3), 409.
    .. [2] P. M. G. Silson, "Coarse Alignment of a Ship’s Strapdown Inertial
          Attitude Reference System Using Velocity Loci", IEEE Trans. Instrum.
          Meas., vol. 60, pp. 1930-1941, Jun. 2011.
    .. [3] F. L. Markley, "Attitude Determination using Vector Observations
           and the Singular Value Decomposition", The Journal of the
           Astronautical Sciences, Vol. 36, No. 3, pp. 245-258, Jul.-Sept.
           1988.
    """
    n_samples = theta.shape[0]
    Vg = np.zeros((n_samples + 1, 3))
    if VE is not None:
        Vg[:, 0] = VE
    if VN is not None:
        Vg[:, 1] = VN

    lat = np.deg2rad(lat)

    slat, clat = np.sin(lat), np.cos(lat)
    tlat = slat / clat
    re, rn = earth.principal_radii(lat)
    u = earth.RATE * np.array([0, clat, slat])
    g = np.array([0, 0, -earth.gravity(slat)])

    Cb0b = np.empty((n_samples + 1, 3, 3))
    Cg0g = np.empty((n_samples + 1, 3, 3))
    Cb0b[0] = np.identity(3)
    Cg0g[0] = np.identity(3)

    Vg_m = 0.5 * (Vg[1:] + Vg[:-1])

    rho = np.empty_like(Vg_m)
    rho[:, 0] = -Vg_m[:, 1] / rn
    rho[:, 1] = Vg_m[:, 0] / re
    rho[:, 2] = Vg_m[:, 0] / re * tlat

    for i in range(n_samples):
        Cg0g[i + 1] = Cg0g[i].dot(dcm.from_rv((rho[i] + u) * dt))
        Cb0b[i + 1] = Cb0b[i].dot(dcm.from_rv(theta[i]))

    f_g = np.cross(u, Vg) - g
    f_g0 = util.mv_prod(Cg0g, f_g)
    f_g0 = 0.5 * (f_g0[1:] + f_g0[:-1])
    f_g0 = np.vstack((np.zeros(3), f_g0))
    V_g0 = util.mv_prod(Cg0g, Vg) + dt * np.cumsum(f_g0, axis=0)

    V_b0 = np.cumsum(util.mv_prod(Cb0b[:-1], dv), axis=0)
    V_b0 = np.vstack((np.zeros(3), V_b0))

    k = n_samples // 2
    b = V_g0[k:2 * k] - V_g0[:k]
    b /= np.linalg.norm(b, axis=1)[:, None]

    r = V_b0[k:2 * k] - V_b0[:k]
    r /= np.linalg.norm(r, axis=1)[:, None]

    B = np.zeros((3, 3))
    for bi, ri in zip(b, r):
        B += np.outer(bi, ri)
    n_obs = b.shape[0]
    B /= n_obs

    U, s, VT = svd(B, overwrite_a=True)
    d = det(U) * det(VT)
    Cg0b0 = U.dot(np.diag([1, 1, d])).dot(VT)

    Cgb = Cg0g[-1].T.dot(Cg0b0).dot(Cb0b[-1])

    s[-1] *= d
    trace_s = np.sum(s)
    L = 1 - trace_s
    D = trace_s - s
    M = np.identity(3) - np.diag(s)
    if L < 0 or np.any(M < 0):
        L = max(L, 0)
        M[M < 0] = 0
        warn("Negative values encountered when estimating the covariance, "
             "they were set to zeros.")

    R = (L * M / n_obs)**0.5 / D
    R = U.dot(R)
    R = Cg0g[-1].T.dot(R)
    R = np.rad2deg(R)

    return dcm.to_hpr(Cgb), R.dot(R.T)
Example #46
0
    def evaluate_basis_at(self, grid, component, prefactor=False):
        r"""Evaluate the basis functions :math:`\phi_k` recursively at the given nodes :math:`\gamma`.

        :param grid: The grid :math:\Gamma` containing the nodes :math:`\gamma`.
        :type grid: A class having a :py:meth:`get_nodes(...)` method.
        :param component: The index :math:`i` of a single component :math:`\Phi_i` to evaluate.
        :param prefactor: Whether to include a factor of :math:`\frac{1}{\sqrt{\det(Q)}}`.
        :type prefactor: bool, default is ``False``.
        :return: A two-dimensional ndarray :math:`H` of shape :math:`(|\mathcal{K}_i|, |\Gamma|)` where
                 the entry :math:`H[\mu(k), i]` is the value of :math:`\phi_k(\gamma_i)`.
        """
        D = self._dimension

        bas = self._basis_shapes[component]
        bs = self._basis_sizes[component]

        # TODO: Consider putting this into the Grid class as 2nd level API
        # Allow ndarrays for the 'grid' argument
        if isinstance(grid, Grid):
            # The overall number of nodes
            nn = grid.get_number_nodes(overall=True)
            # The grid nodes
            nodes = grid.get_nodes()
        else:
            # The overall number of nodes
            nn = prod(grid.shape[1:])
            # The grid nodes
            nodes = grid

        # Allocate the storage array
        phi = zeros((bs, nn), dtype=complexfloating)

        # Precompute some constants
        q, p, Q, P, S = self._Pis[component]

        Qinv = inv(Q)
        Qbar = conj(Q)
        QQ = dot(Qinv, Qbar)

        # Compute the ground state phi_0 via direct evaluation
        mu0 = bas[tuple(D * [0])]
        phi[mu0, :] = self._evaluate_phi0(self._Pis[component],
                                          nodes,
                                          prefactor=False)

        # Compute all higher order states phi_k via recursion
        for d in xrange(D):
            # Iterator for all valid index vectors k
            indices = bas.get_node_iterator(mode="chain", direction=d)

            for k in indices:
                # Current index vector
                ki = vstack(k)

                # Access predecessors
                phim = zeros((D, nn), dtype=complexfloating)

                for j, kpj in bas.get_neighbours(k, selection="backward"):
                    mukpj = bas[kpj]
                    phim[j, :] = phi[mukpj, :]

                # Compute 3-term recursion
                p1 = (nodes - q) * phi[bas[k], :]
                p2 = sqrt(ki) * phim

                t1 = sqrt(2.0 / self._eps**2) * dot(Qinv[d, :], p1)
                t2 = dot(QQ[d, :], p2)

                # Find multi-index where to store the result
                kped = bas.get_neighbours(k, selection="forward", direction=d)

                # Did we find this k?
                if len(kped) > 0:
                    kped = kped[0]

                    # Store computed value
                    phi[bas[kped[1]], :] = (t1 - t2) / sqrt(ki[d] + 1.0)

        if prefactor is True:
            phi = phi / self._sqrt[component](det(Q))

        return phi
Example #47
0
def LinApp_Solve(AA, BB, CC, DD, FF, GG, HH, JJ, KK, LL, MM, WWW, TT, NN, Z0,
                 Sylv):
    """
    This code takes Uhlig's original code and puts it in the form of a
    function.  This version outputs the policy function coefficients: PP,
    QQ and UU for X, and RR, SS and VV for Y.

    Inputs overview:
    The matrices of derivatives: AA - TT.
    The autoregression coefficient matrix NN from the law of motion for Z.
    Z0 is the Z-point about which the linearization is taken.  For
    linearizing about the steady state this is Zbar and normally Zbar = 0.
    Sylv is an indicator variable telling the program to use the built-in
    function sylvester() to solve for QQ and SS, if possible.  Default is
    to use Sylv=1.

    Parameters
    ----------
    AA : array_like, dtype=float, shape=(ny, nx)
        The matrix represented above by :math:`A`. It is the matrix of
        derivatives of the Y equations with repsect to :math:`X_t`
    BB : array_like, dtype=float, shape=(ny, nx)
        The matrix represented above by :math:`B`. It is the matrix of
        derivatives of the Y equations with repsect to
        :math:`X_{t-1}`.
    CC : array_like, dtype=float, shape=(ny, ny)
        The matrix represented above by :math:`C`. It is the matrix of
        derivatives of the Y equations with repsect to :math:`Y_t`
    DD : array_like, dtype=float, shape=(ny, nz)
        The matrix represented above by :math:`C`. It is the matrix of
        derivatives of the Y equations with repsect to :math:`Z_t`
    FF : array_like, dtype=float, shape=(nx, nx)
        The matrix represetned above by :math:`F`. It is the matrix of
        derivatives of the model's characterizing equations with
        respect to :math:`X_{t+1}`
    GG : array_like, dtype=float, shape=(nx, nx)
        The matrix represetned above by :math:`G`. It is the matrix of
        derivatives of the model's characterizing equations with
        respect to :math:`X_t`
    HH : array_like, dtype=float, shape=(nx, nx)
        The matrix represetned above by :math:`H`. It is the matrix of
        derivatives of the model's characterizing equations with
        respect to :math:`X_{t-1}`
    JJ : array_like, dtype=float, shape=(nx, ny)
        The matrix represetned above by :math:`J`. It is the matrix of
        derivatives of the model's characterizing equations with
        respect to :math:`Y_{t+1}`
    KK : array_like, dtype=float, shape=(nx, ny)
        The matrix represetned above by :math:`K`. It is the matrix of
        derivatives of the model's characterizing equations with
        respect to :math:`Y_t`
    LL : array_like, dtype=float, shape=(nx, nz)
        The matrix represetned above by :math:`L`. It is the matrix of
        derivatives of the model's characterizing equations with
        respect to :math:`Z_{t+1}`
    MM : array_like, dtype=float, shape=(nx, nz)
        The matrix represetned above by :math:`M`. It is the matrix of
        derivatives of the model's characterizing equations with
        respect to :math:`Z_t`
    WWW : array, dtype=float, shape=(ny,)
        The vector of the numberial errors of first ny characterizing
        equations
    TT : array, dtype=float, shape=(nx,)
        The vector of the numberial errors of the next nx characterizing
        equations following the first ny equations
    NN : array_like, dtype=float, shape=(nz, nz)
        The autocorrelation matrix for the exogenous state vector z.
    Z0 : array, dtype=float, shape=(nz,)
        the Z-point about which the linearization is taken.  For linearizing 
        about the steady state this is Zbar and normally Zbar = 0.
        QQ if true.
    Sylv: binary, dtype=int 
        an indicator variable telling the program to use the built-in
        function sylvester() to solve for QQ and SS, if possible.  Default is
        to use Sylv=1.

    Returns
    -------
    P : 2D-array, dtype=float, shape=(nx, nx)
        The matrix :math:`P` in the law of motion for endogenous state
        variables described above.
    Q : 2D-array, dtype=float, shape=(nx, nz)
        The matrix :math:`Q` in the law of motion for exogenous state
        variables described above.
    U : array, dtype=float, shape=(nx,)
        ??????????
    R : 2D-array, dtype=float, shape=(ny, nx)
        The matrix :math:`R` in the law of motion for endogenous state
        variables described above.
    S : 2D-array, dtype=float, shape=(ny, nz)
        The matrix :math:`S` in the law of motion for exogenous state
        variables described above.
    V : array, dtype=float, shape=(ny,)
        ???????????
    References
    ----------
    .. [1] Uhlig, H. (1999): "A toolkit for analyzing nonlinear dynamic
       stochastic models easily," in Computational Methods for the Study
       of Dynamic Economies, ed. by R. Marimon, pp. 30-61. Oxford
       University Press.

    """
    #The original coding we did used the np.matrix form for our matrices so we
    #make sure to set our inputs to numpy matrices.
    AA = np.matrix(AA)
    BB = np.matrix(BB)
    CC = np.matrix(CC)
    DD = np.matrix(DD)
    FF = np.matrix(FF)
    GG = np.matrix(GG)
    HH = np.matrix(HH)
    JJ = np.matrix(JJ)
    KK = np.matrix(KK)
    LL = np.matrix(LL)
    MM = np.matrix(MM)
    NN = np.matrix(NN)
    WWW = np.array(WWW)
    TT = np.array(TT)
    Z0 = np.array(Z0)
    #Tolerance level to use
    TOL = .000001

    # Here we use matrices to get pertinent dimensions.
    nx = FF.shape[1]
    l_equ = CC.shape[0]
    ny = CC.shape[1]
    nz = min(NN.shape)

    # The following if and else blocks form the
    # Psi, Gamma, Theta Xi, Delta mats
    if l_equ == 0:
        if CC.any():
            # This blcok makes sure you don't throw an error with an empty CC.
            CC_plus = la.pinv(CC)
            CC_0 = _nullSpaceBasis(CC.T)
        else:
            CC_plus = np.mat([])
            CC_0 = np.mat([])
        Psi_mat = FF
        Gamma_mat = -GG
        Theta_mat = -HH
        Xi_mat = np.mat(
            vstack((hstack(
                (Gamma_mat, Theta_mat)), hstack((eye(nx), zeros((nx, nx)))))))
        Delta_mat = np.mat(
            vstack((hstack((Psi_mat, zeros(
                (nx, nx)))), hstack((zeros((nx, nx)), eye(nx))))))

    else:
        CC_plus = la.pinv(CC)
        CC_0 = _nullSpaceBasis(CC.T)
        if l_equ != ny:
            Psi_mat = vstack((zeros((l_equ - ny, nx)), FF \
                            - dot(dot(JJ, CC_plus), AA)))
            Gamma_mat = vstack((dot(CC_0, AA), dot(dot(JJ, CC_plus), BB) \
                        - GG + dot(dot(KK, CC_plus), AA)))
            Theta_mat = vstack((dot(CC_0, BB), dot(dot(KK, CC_plus), BB) - HH))
        else:
            CC_inv = la.inv(CC)
            Psi_mat = FF - dot(JJ.dot(CC_inv), AA)
            Gamma_mat = dot(JJ.dot(CC_inv), BB) - GG + dot(dot(KK, CC_inv), AA)
            Theta_mat = dot(KK.dot(CC_inv), BB) - HH
        Xi_mat = vstack((hstack((Gamma_mat, Theta_mat)), \
                            hstack((eye(nx), zeros((nx, nx))))))
        Delta_mat = vstack((hstack((Psi_mat, np.mat(zeros((nx, nx))))),\
                                hstack((zeros((nx, nx)), eye(nx)))))

    # Now we need the generalized eigenvalues/vectors for Xi with respect to
    # Delta. That is eVals and eVecs below.

    eVals, eVecs = la.eig(Xi_mat, Delta_mat)
    if npla.matrix_rank(eVecs) < nx:
        print("Error: Xi is not diagonalizable, stopping...")

    # From here to line 158 we Diagonalize Xi, form Lambda/Omega and find P.
    else:
        Xi_sortabs = np.sort(abs(eVals))
        Xi_sortindex = np.argsort(abs(eVals))
        Xi_sortedVec = np.array([eVecs[:, i] for i in Xi_sortindex]).T
        Xi_sortval = eVals[Xi_sortindex]
        Xi_select = np.arange(0, nx)
        if np.imag(Xi_sortval[nx - 1]).any():
            if (abs(Xi_sortval[nx - 1] - sp.conj(Xi_sortval[nx])) < TOL):
                drop_index = 1
                cond_1 = (abs(np.imag(Xi_sortval[drop_index - 1])) > TOL)
                cond_2 = drop_index < nx
                while cond_1 and cond_2:
                    drop_index += 1
                if drop_index >= nx:
                    print("There is an error. Too many complex eigenvalues." +
                          " Quitting...")
                else:
                    print("Droping the lowest real eigenvalue. Beware of" +
                          " sunspots!")
                    Xi_select = np.array([np.arange(0, drop_index - 1),\
                                          np.arange(drop_index, nx + 1)])
        # Here Uhlig computes stuff if user chose "Manual roots" I skip it.
        if max(abs(Xi_sortval[Xi_select])) > 1 + TOL:
            print(
                "It looks like we have unstable roots. This might not work...")
        if abs(max(abs(Xi_sortval[Xi_select])) - 1) < TOL:
            print("Check the model to make sure you have a unique steady" +
                  " state we are having problems with convergence.")
        Lambda_mat = np.diag(Xi_sortval[Xi_select])
        Omega_mat = Xi_sortedVec[nx:2 * nx, Xi_select]

        if npla.matrix_rank(Omega_mat) < nx:
            print("Omega matrix is not invertible, Can't solve for P; we" +
                  " proceed with QZ-method instead.")

            #~~~~~~~~~ QZ-method codes from SOLVE_QZ ~~~~~~~~#
            Delta_up, Xi_up, UUU, VVV = la.qz(Delta_mat,
                                              Xi_mat,
                                              output='complex')
            UUU = UUU.T
            Xi_eigval = np.diag(
                np.diag(Xi_up) / np.maximum(np.diag(Delta_up), TOL))
            Xi_sortabs = np.sort(abs(np.diag(Xi_eigval)))
            Xi_sortindex = np.argsort(abs(np.diag(Xi_eigval)))
            Xi_sortval = Xi_eigval[Xi_sortindex, Xi_sortindex]
            Xi_select = np.arange(0, nx)
            stake = max(abs(Xi_sortval[Xi_select])) + TOL

            Delta_up, Xi_up, UUU, VVV = qzdiv(stake, Delta_up, Xi_up, UUU, VVV)

            #Check conditions from line 49-109
            if np.imag(Xi_sortval[nx - 1]).any():
                if (abs(Xi_sortval[nx - 1] - sp.conj(Xi_sortval[nx])) < TOL):
                    print(
                        "Problem: You have complex eigenvalues! And this means"
                        +
                        " PP matrix will contain complex numbers by this method."
                    )
                drop_index = 1
                cond_1 = (abs(np.imag(Xi_sortval[drop_index - 1])) > TOL)
                cond_2 = drop_index < nx
                while cond_1 and cond_2:
                    drop_index += 1
                if drop_index >= nx:
                    print("There is an error. Too many complex eigenvalues." +
                          " Quitting...")
                else:
                    print("Dropping the lowest real eigenvalue. Beware of" +
                          " sunspots!")
                    for i in range(drop_index, nx + 1):
                        Delta_up, Xi_up, UUU, VVV = qzswitch(
                            i, Delta_up, Xi_up, UUU, VVV)
                    Xi_select1 = np.arange(0, drop_index - 1)
                    Xi_select = np.append(Xi_select1,
                                          np.arange(drop_index, nx + 1))

            if Xi_sortval[max(Xi_select)] < 1 - TOL:
                print('There are stable roots NOT used. Proceeding with the' +
                      ' smallest root.')
            if max(abs(Xi_sortval[Xi_select])) > 1 + TOL:
                print(
                    "It looks like we have unstable roots. This might not work..."
                )
            if abs(max(abs(Xi_sortval[Xi_select])) - 1) < TOL:
                print("Check the model to make sure you have a unique steady" +
                      " state we are having problems with convergence.")
            #End of checking conditions
            #Lambda_mat = np.diag(Xi_sortval[Xi_select]) # to help sol_out.m

            VVV = VVV.conj().T
            VVV_2_1 = VVV[nx:2 * nx, 0:nx]
            VVV_2_2 = VVV[nx:2 * nx, nx:2 * nx]
            UUU_2_1 = UUU[nx:2 * nx, 0:nx]
            VVV = VVV.conj().T

            if abs(la.det(UUU_2_1)) < TOL:
                print(
                    "One necessary condition for computing P is NOT satisfied,"
                    + " but we proceed anyways...")
            if abs(la.det(VVV_2_1)) < TOL:
                print(
                    "VVV_2_1 matrix, used to compute for P, is not invertible; we"
                    + " are in trouble but we proceed anyways...")

            PP = np.matrix(la.solve(-VVV_2_1, VVV_2_2))
            PP_imag = np.imag(PP)
            PP = np.real(PP)
            if (sum(sum(abs(PP_imag))) / sum(sum(abs(PP))) > .000001).any():
                print(
                    "A lot of P is complex. We will continue with the" +
                    " real part and hope we don't lose too much information.")
            #~~~~~~~~~ End of QZ-method ~~~~~~~~~#

        #This follows the original uhlig.py file
        else:
            PP = dot(dot(Omega_mat, Lambda_mat), la.inv(Omega_mat))
            PP_imag = np.imag(PP)
            PP = np.real(PP)
            if (sum(sum(abs(PP_imag))) / sum(sum(abs(PP))) > .000001).any():
                print(
                    "A lot of P is complex. We will continue with the" +
                    " real part and hope we don't lose too much information.")
    # The code from here to the end was from he Uhlig file calc_qrs.m.
    # I think for python it fits better here than in a separate file.

    # The if and else below make RR and VV depending on our model's setup.
    if l_equ == 0:
        RR = zeros((0, nx))
        VV = hstack((kron(NN.T, FF) + kron(eye(nz), \
            (dot(FF, PP) + GG)), kron(NN.T, JJ) + kron(eye(nz), KK)))

    else:
        RR = -dot(CC_plus, (dot(AA, PP) + BB))
        VV = sp.vstack((hstack((kron(eye(nz), AA), \
                        kron(eye(nz), CC))), hstack((kron(NN.T, FF) +\
                        kron(eye(nz), dot(FF, PP) + dot(JJ, RR) + GG),\
                        kron(NN.T, JJ) + kron(eye(nz), KK)))))

    # Now we use LL, NN, RR, VV to get the QQ, RR, SS, VV matrices.
    # first try using Sylvester equation solver
    if Sylv:
        if ny > 0:
            PM = (FF - la.solve(JJ.dot(CC), AA))
            if npla.matrix_rank(PM) < nx + ny:
                Sylv = 0
                print("Sylvester equation solver condition is not satisfied;"\
                        +" proceed with the original method...")
        else:
            if npla.matrix_rank(FF) < nx:
                Sylv = 0
                print("Sylvester equation solver condition is not satisfied;"\
                        +" proceed with the original method...")
        print("Using Sylvester equation solver...")
        if ny > 0:
            Anew = la.solve(PM, (FF.dot(PP)+GG+JJ.dot(RR)-\
                    la.solve(KK.dot(CC), AA)) )
            Bnew = NN
            Cnew1 = la.solve(JJ.dot(CC),DD.dot(NN))+la.solve(KK.dot(CC), DD)-\
                    LL.dot(NN)-MM
            Cnew = la.solve(PM, Cnew1)
            QQ = la.solve_sylvester(Anew, Bnew, Cnew)
            SS = la.solve(-CC, (AA.dot(QQ) + DD))
        else:
            Anew = la.solve(FF, (FF.dot(PP) + GG))
            Bnew = NN
            Cnew = la.solve(FF, (-LL.dot(NN) - MM))
            QQ = la.solve_sylvester(Anew, Bnew, Cnew)
            SS = np.zeros((0, nz))  #empty matrix

    # then the Uhlig's way
    else:
        if (npla.matrix_rank(VV) < nz * (nx + ny)):
            print("Sorry but V is not invertible. Can't solve for Q and S;" +
                  " but we proceed anyways...")

        LL = sp.mat(LL)
        NN = sp.mat(NN)
        LLNN_plus_MM = dot(LL, NN) + MM

        if DD.any():
            impvec = vstack([DD, LLNN_plus_MM])
        else:
            impvec = LLNN_plus_MM

        impvec = np.reshape(impvec, ((nx + ny) * nz, 1), 'F')

        QQSS_vec = np.matrix(la.solve(-VV, impvec))

        if (max(abs(QQSS_vec)) == sp.inf).any():
            print("We have issues with Q and S. Entries are undefined." +
                  " Probably because V is no inverible.")

        #Build QQ SS
        QQ = np.reshape(np.matrix(QQSS_vec[0:nx * nz, 0]), (nx, nz), 'F')

        SS = np.reshape(QQSS_vec[(nx * nz):((nx + ny) * nz), 0],\
                            (ny, nz), 'F')

    #Build WW - WW has the property [x(t)',y(t)',z(t)']=WW [x(t)',z(t)'].
    WW = sp.vstack(
        (hstack((eye(nx), zeros((nx, nz)))),
         hstack((dot(RR, la.pinv(PP)), (SS - dot(dot(RR, la.pinv(PP)), QQ)))),
         hstack((zeros((nz, nx)), eye(nz)))))

    # find constant terms
    # redefine matrix to be 2D-array for generating vectors UU and VVV
    AA = np.array(AA)
    CC = np.array(CC)
    FF = np.array(FF)
    GG = np.array(GG)
    JJ = np.array(JJ)
    KK = np.array(KK)
    LL = np.array(LL)
    NN = np.array(NN)
    RR = np.array(RR)
    QQ = np.array(QQ)
    SS = np.array(SS)
    if ny > 0:
        UU1 = -(FF.dot(PP) + GG + JJ.dot(RR) + FF -
                (JJ + KK).dot(la.solve(CC, AA)))
        UU2 = (TT+(FF.dot(QQ)+JJ.dot(SS)+LL).dot(NN.dot(Z0)-Z0)- \
            (JJ+KK).dot(la.solve(CC,WWW)))
        UU = la.solve(UU1, UU2)
        VVV = la.solve(-CC, (WWW + AA.dot(UU)))
    else:
        UU = la.solve(-(FF.dot(PP) + FF + GG),
                      (TT + (FF.dot(QQ) + LL).dot(NN.dot(Z0) - Z0)))
        VVV = np.array([])

    return np.array(PP), np.array(QQ), np.array(UU), np.array(RR), np.array(SS),\
             np.array(VVV)
Example #48
0
    def __init__(self,
                 bounds,
                 fit,
                 A=None,
                 npop=None,
                 eta_mu=1.0,
                 eta_sigma=None,
                 eta_Bmat=None,
                 adapt_sampling=False,
                 ncores=1,
                 seed=None):

        if seed:
            random.seed(seed)
            np.random.seed(seed)

        self.seed = seed
        patience = 100
        self.fitness_hom = -np.inf

        self.f = fit
        self.eta_mu = eta_mu
        self.use_adasam = adapt_sampling
        self.ncores = ncores
        self.bounds = bounds

        dim = len(bounds)
        A = np.eye(dim) if A is None else A
        sigma = abs(det(A))**(1.0 / dim)
        bmat = A * (1.0 / sigma)
        self.dim = dim
        self.sigma = sigma
        self.bmat = bmat

        # default population size and learning rates
        npop = int(4 + 3 * log(dim)) if npop is None else npop
        eta_sigma = 3 * (3 + log(dim)) * (
            1.0 / (5 * dim * sqrt(dim))) if eta_sigma is None else eta_sigma
        eta_Bmat = 3 * (3 + log(dim)) * (
            1.0 / (5 * dim * sqrt(dim))) if eta_Bmat is None else eta_Bmat
        self.npop = npop
        self.eta_sigma = eta_sigma
        self.eta_bmat = eta_Bmat

        use_fshape = True
        # compute utilities if using fitness shaping
        if use_fshape:
            a = log(1 + 0.5 * npop)
            utilities = array([max(0, a - log(k)) for k in range(1, npop + 1)])
            utilities /= sum(utilities)
            utilities -= 1.0 / npop  # broadcast
            utilities = utilities[::-1]  # ascending order
        else:
            utilities = None
        self.use_fshape = use_fshape
        self.utilities = utilities

        # stuff for adasam
        self.eta_sigma_init = eta_sigma
        self.sigma_old = None

        # logging
        self.fitness_best = -np.inf
        self.mu_best = None
        self.done = False
        self.counter = 0
        self.patience = patience
        self.history = {'eta_sigma': [], 'sigma': [], 'fitness': []}

        # do not use options below when one individual in population is used
        if npop == 1:
            self.use_fshape = False
            self.use_adasam = False
Example #49
0
sol=root(fun,0.1)
print("ROOT:",sol.x,sol.fun)

#3----Interpolation插值
x=np.linspace(0,1,10)
y=np.sin(2*np.pi*x)
from scipy.interpolate import interp1d
li=interp1d(x,y,kind="cubic")
x_new=np.linspace(0,1,50)
y_new=li(x_new)
figure()
plot(x,y,"r")
plot(x_new,y_new,"k")
show()
print(y_new)

#4 linear
from scipy import linalg as lg
arr=np.array([[1,2],[3,4]])
print("Det:",lg.det(arr))
print("Inv:",lg.inv(arr))
b=np.array([6,14])
print("Sol:",lg.solve(arr,b))
print("Eig:",lg.eig(arr))
print("LU:",lg.lu(arr))
print("QR:",lg.qr(arr))
print("SVD:",lg.svd(arr))
print("Schur:",lg.schur(arr))

#others
Example #50
0
    def __new__(cls, unitary_matrix, *, fidelity=(1.0 - 1.0e-9)):
        """Perform the Weyl chamber decomposition, and optionally choose a specialized subclass.

        The flip into the Weyl Chamber is described in B. Kraus and J. I. Cirac, Phys. Rev. A 63,
        062309 (2001).

        FIXME: There's a cleaner-seeming method based on choosing branch cuts carefully, in Andrew
        M. Childs, Henry L. Haselgrove, and Michael A. Nielsen, Phys. Rev. A 68, 052311, but I
        wasn't able to get that to work.

        The overall decomposition scheme is taken from Drury and Love, arXiv:0806.4015 [quant-ph].
        """
        pi = np.pi
        pi2 = np.pi / 2
        pi4 = np.pi / 4

        # Make U be in SU(4)
        U = np.array(unitary_matrix, dtype=complex, copy=True)
        detU = la.det(U)
        U *= detU ** (-0.25)
        global_phase = cmath.phase(detU) / 4

        Up = _Bd.dot(U).dot(_B)
        M2 = Up.T.dot(Up)

        # M2 is a symmetric complex matrix. We need to decompose it as M2 = P D P^T where
        # P ∈ SO(4), D is diagonal with unit-magnitude elements.
        # D, P = la.eig(M2)  # this can fail for certain kinds of degeneracy
        state = np.random.default_rng(2020)
        for _ in range(100):  # FIXME: this randomized algorithm is horrendous
            M2real = state.normal() * M2.real + state.normal() * M2.imag
            _, P = np.linalg.eigh(M2real)
            D = P.T.dot(M2).dot(P).diagonal()
            if np.allclose(P.dot(np.diag(D)).dot(P.T), M2, rtol=0, atol=1.0e-13):
                break
        else:
            raise QiskitError("TwoQubitWeylDecomposition: failed to diagonalize M2")

        d = -np.angle(D) / 2
        d[3] = -d[0] - d[1] - d[2]
        cs = np.mod((d[:3] + d[3]) / 2, 2 * np.pi)

        # Reorder the eigenvalues to get in the Weyl chamber
        cstemp = np.mod(cs, pi2)
        np.minimum(cstemp, pi2 - cstemp, cstemp)
        order = np.argsort(cstemp)[[1, 2, 0]]
        cs = cs[order]
        d[:3] = d[order]
        P[:, :3] = P[:, order]

        # Fix the sign of P to be in SO(4)
        if np.real(la.det(P)) < 0:
            P[:, -1] = -P[:, -1]

        # Find K1, K2 so that U = K1.A.K2, with K being product of single-qubit unitaries
        K1 = _B.dot(Up).dot(P).dot(np.diag(np.exp(1j * d))).dot(_Bd)
        K2 = _B.dot(P.T).dot(_Bd)

        K1l, K1r, phase_l = decompose_two_qubit_product_gate(K1)
        K2l, K2r, phase_r = decompose_two_qubit_product_gate(K2)
        global_phase += phase_l + phase_r

        K1l = K1l.copy()

        # Flip into Weyl chamber
        if cs[0] > pi2:
            cs[0] -= 3 * pi2
            K1l = K1l.dot(_ipy)
            K1r = K1r.dot(_ipy)
            global_phase += pi2
        if cs[1] > pi2:
            cs[1] -= 3 * pi2
            K1l = K1l.dot(_ipx)
            K1r = K1r.dot(_ipx)
            global_phase += pi2
        conjs = 0
        if cs[0] > pi4:
            cs[0] = pi2 - cs[0]
            K1l = K1l.dot(_ipy)
            K2r = _ipy.dot(K2r)
            conjs += 1
            global_phase -= pi2
        if cs[1] > pi4:
            cs[1] = pi2 - cs[1]
            K1l = K1l.dot(_ipx)
            K2r = _ipx.dot(K2r)
            conjs += 1
            global_phase += pi2
            if conjs == 1:
                global_phase -= pi
        if cs[2] > pi2:
            cs[2] -= 3 * pi2
            K1l = K1l.dot(_ipz)
            K1r = K1r.dot(_ipz)
            global_phase += pi2
            if conjs == 1:
                global_phase -= pi
        if conjs == 1:
            cs[2] = pi2 - cs[2]
            K1l = K1l.dot(_ipz)
            K2r = _ipz.dot(K2r)
            global_phase += pi2
        if cs[2] > pi4:
            cs[2] -= pi2
            K1l = K1l.dot(_ipz)
            K1r = K1r.dot(_ipz)
            global_phase -= pi2

        a, b, c = cs[1], cs[0], cs[2]

        # Save the non-specialized decomposition for later comparison
        od = super().__new__(TwoQubitWeylDecomposition)
        od.a = a
        od.b = b
        od.c = c
        od.K1l = K1l
        od.K1r = K1r
        od.K2l = K2l
        od.K2r = K2r
        od.global_phase = global_phase
        od.requested_fidelity = fidelity
        od.calculated_fidelity = 1.0
        od.unitary_matrix = np.array(unitary_matrix, dtype=complex, copy=True)
        od.unitary_matrix.setflags(write=False)
        od._original_decomposition = None
        od._is_flipped_from_original = False

        def is_close(ap, bp, cp):
            da, db, dc = a - ap, b - bp, c - cp
            tr = 4 * complex(
                math.cos(da) * math.cos(db) * math.cos(dc),
                math.sin(da) * math.sin(db) * math.sin(dc),
            )
            fid = trace_to_fid(tr)
            return fid >= fidelity

        if fidelity is None:  # Don't specialize if None
            instance = super().__new__(
                TwoQubitWeylGeneral if cls is TwoQubitWeylDecomposition else cls
            )
        elif is_close(0, 0, 0):
            instance = super().__new__(TwoQubitWeylIdEquiv)
        elif is_close(pi4, pi4, pi4) or is_close(pi4, pi4, -pi4):
            instance = super().__new__(TwoQubitWeylSWAPEquiv)
        elif (lambda x: is_close(x, x, x))(_closest_partial_swap(a, b, c)):
            instance = super().__new__(TwoQubitWeylPartialSWAPEquiv)
        elif (lambda x: is_close(x, x, -x))(_closest_partial_swap(a, b, -c)):
            instance = super().__new__(TwoQubitWeylPartialSWAPFlipEquiv)
        elif is_close(a, 0, 0):
            instance = super().__new__(TwoQubitWeylControlledEquiv)
        elif is_close(pi4, pi4, c):
            instance = super().__new__(TwoQubitWeylMirrorControlledEquiv)
        elif is_close((a + b) / 2, (a + b) / 2, c):
            instance = super().__new__(TwoQubitWeylfSimaabEquiv)
        elif is_close(a, (b + c) / 2, (b + c) / 2):
            instance = super().__new__(TwoQubitWeylfSimabbEquiv)
        elif is_close(a, (b - c) / 2, (c - b) / 2):
            instance = super().__new__(TwoQubitWeylfSimabmbEquiv)
        else:
            instance = super().__new__(TwoQubitWeylGeneral)

        instance._original_decomposition = od
        return instance
Example #51
0
)

### conjugate transpose

assert array_equal(
    sp.array([[1j, 2j]]).conjugate().T,
    sp.array([[-1j], [-2j]]),
)

### identity

assert sp.array_equal(sp.eye(2), sp.array([[1., 0.], [0., 1.]]))

### determinant

assert array_equal(la.det(sp.array([[1, 2], [3, 4]])), -2)

### inverse

# **DO NOT USE THIS TO SOLVE LINEAR SYSTEMS**

# use <# solve> instead, or an explicit LU decomposition.

# (solve likely uses LU it under the hood)

# This will be faster and more stable.

# Learn what LU decomposition is now if you don't know so.

A = sp.array([[1, 2], [3, 4]])
assert array_equal(la.inv(A).dot(A), sp.eye(2))
Example #52
0
'''
Created on 2018年7月1日

@author: Administrator
'''
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from scipy import fftpack as fft
from scipy import io as spio
from scipy import linalg
from scipy import optimize


def f(x):
    return x**2 + 10 * np.sin(x)


if __name__ == '__main__':
    a = np.ones([3, 3])
    spio.savemat('file.mat', {'a': a})
    data = spio.loadmat("file.mat", struct_as_record=True)
    print(data['a'])
    arr = np.array([[1, 2, 3], [3, 4, 6], [5, 7, 8]])
    print('arr:', linalg.det(arr))
    x = np.arange(-10, 10, 0.1)
    print("fmin:", optimize.fmin(f, -5))
    plt.plot(x, f(x))
    plt.show()
    pass
Example #53
0
 def test_simple_complex(self):
     a = [[1, 2], [3, 4j]]
     a_det = det(a)
     assert_almost_equal(a_det, -6 + 4j)
Example #54
0
# creating a numpy 32x32 array A with normally distributed random numbers g_(i,j)~N(0,1) as it's elements
# this means a normal distribution with mu = 0 and standard deviation of 1.0
# if you want values only between 0 and 1 then use np.random.random((size, size))
A = np.random.normal(0, 1, (32, 32))
print(A)

# calculating the inverse B= A^(-1)
B = lalg.inv(A)
print(B)

# transpose matrix
C = A.T
#print(C)

# find the determinant
determinant_A = lalg.det(A)
#print(determinant_A)

# The product of AB using the matrices A and B
D = np.multiply(A, B)
print(D)

# The Dot product (dot and vector)
E = np.dot(A, B)
#print(E)

F = np.vdot(A, B)
#print(F)

# calculating eigenvalues and eigenvectors using scipy module .eig
eigenvalues, eigenvectors = lalg.eig(A)
Example #55
0
xmax, ymax, zmax = 1., 1., 1.  # Physical domain that density estimation is performed on.
xdim, ydim, zdim = 65, 65, 65  # Grid resolution of the physical domain.

H = np.zeros((3, 3))  # bandwidth matrix
H[0, 0], H[0, 1], H[0, 2] = .0025, 0., 0.
H[1, 0], H[1, 1], H[1, 2] = 0., .0025, 0.
H[2, 0], H[2, 1], H[2, 2] = 0., 0., .0025
# ------------------------------------------------------------------------------

# SDE computation --------------------------------------------------------------
a, b, c = verts[0], verts[1], verts[2]  # three vertices of the triangle
area = TriangleArea(a, b, c)  # area of the triangle

Hi = linalg.inv(H)  # inverse of the bandwidth matrix
Hi_sqrt = linalg.sqrtm(Hi)  # square root of Hi (i.e., Hi_sqrt x Hi_sqrt = Hi)
Hi_sqrt_det = float(linalg.det(Hi_sqrt))  # determinant of Hi_sqrt
Hi_sqrt = Hi_sqrt.flatten().tolist()

# Create a numpy array to store SDE.
sde = np.zeros([zdim, ydim, xdim])

# Compute SDE for each grid point.
for i in tqdm(range(xdim * ydim * zdim)):
    # physical position of the grid point
    gk = math.floor(i / (xdim * ydim))
    gj = math.floor((i - gk * xdim * ydim) / xdim)
    gi = i % xdim

    x = [
        gi * (xmax - xmin) / (xdim - 1.) + xmin,
        gj * (ymax - ymin) / (ydim - 1.) + ymin,
Example #56
0
def var_bound(data, modelState, queryState):
    '''
    Determines the variational bounds. Values are mutated in place, but are
    reset afterwards to their initial values. So it's safe to call in a serial
    manner.
    '''
    
    # Unpack the the structs, for ease of access and efficiency
    W   = data.words
    D,_ = W.shape
    means, expMeans, varcs, docLens = queryState.means, queryState.expMeans, queryState.varcs, queryState.docLens
    K, topicMean, sigT, vocab, vocabPrior, A = modelState.K, modelState.topicMean, modelState.sigT, modelState.vocab, modelState.vocabPrior, modelState.A
    
    # Calculate some implicit  variables
    isigT = la.inv(sigT)
    
    bound = 0
    
    if USE_NIW_PRIOR:
        pseudoObsMeans = K + NIW_PSEUDO_OBS_MEAN
        pseudoObsVar   = K + NIW_PSEUDO_OBS_VAR

        # distribution over topic covariance
        bound -= 0.5 * K * pseudoObsVar * log(NIW_PSI)
        bound -= 0.5 * K * pseudoObsVar * log(2)
        bound -= fns.multigammaln(pseudoObsVar / 2., K)
        bound -= 0.5 * (pseudoObsVar + K - 1) * safe_log_det(sigT)
        bound += 0.5 * NIW_PSI * np.trace(isigT)

        # and its entropy
        # is a constant which we skip
        
        # distribution over means
        bound -= 0.5 * K * log(1./pseudoObsMeans) * safe_log_det(sigT)
        bound -= 0.5 / pseudoObsMeans * (topicMean).T.dot(isigT).dot(topicMean)
        
        # and its entropy
        bound += 0.5 * safe_log_det(sigT) # +  a constant
        
    
    # Distribution over document topics
    bound -= (D*K)/2. * LN_OF_2_PI
    bound -= D/2. * la.det(sigT)
    diff   = means - topicMean[np.newaxis,:]
    bound -= 0.5 * np.sum (diff.dot(isigT) * diff)
    bound -= 0.5 * np.sum(varcs * np.diag(isigT)[np.newaxis,:]) # = -0.5 * sum_d tr(V_d \Sigma^{-1}) when V_d is diagonal only.
       
    # And its entropy
#     bound += 0.5 * D * K * LN_OF_2_PI_E + 0.5 * np.sum(np.log(varcs)) 
    
    # Distribution over word-topic assignments and words and the formers
    # entropy. This is somewhat jumbled to avoid repeatedly taking the
    # exp and log of the means
    expMeans = np.exp(means - means.max(axis=1)[:,np.newaxis], out=expMeans)
    R = sparseScalarQuotientOfDot(W, expMeans, vocab)  # D x V   [W / TB] is the quotient of the original over the reconstructed doc-term matrix
    V = expMeans * (R.dot(vocab.T)) # D x K
    
    bound += np.sum(docLens * np.log(np.sum(expMeans, axis=1)))
    bound += np.sum(sparseScalarProductOfSafeLnDot(W, expMeans, vocab).data)
    
    bound += np.sum(means * V)
    bound += np.sum(2 * ssp.diags(docLens,0) * means.dot(A) * means)
    bound -= 2. * scaledSelfSoftDot(means, docLens)
    bound -= 0.5 * np.sum(docLens[:,np.newaxis] * V * (np.diag(A))[np.newaxis,:])
    
    bound -= np.sum(means * V) 
    
    
    return bound
Example #57
0
# -*- coding: utf-8 -*-
from numpy import *
from scipy import linalg as LA
from matplotlib.pyplot import *
import matplotlib.cm as cm

# 多変量分布の図示
rho = 0.5
mu = array([0, 0])  # 平均
S = array([[1, rho], [rho, 1]])  # 分散
Sinv = LA.inv(S)
detS = LA.det(S)


def f(x):
    return exp(-(x - mu).T.dot(Sinv).dot(x - mu) / 2) / (2 * pi * sqrt(detS))


X, Y = meshgrid(linspace(-3, 3, 100), linspace(-3, 3, 100))
Z = vectorize(lambda x, y: f([x, y]))(X, Y)

xlim(-3, 3)
ylim(-3, 3)
pcolor(X, Y, Z, alpha=0.3)
show()


# Gibbs sampling
def next(x):
    new_x = random.normal(rho * x[1], 1 - rho**2)
    new_y = random.normal(rho * new_x, 1 - rho**2)
Example #58
0
	cursor.execute("SELECT VERSION()")
	data = cursor.fetchone()
	print(data)
	db.close()
	
	import numpy as np
	from scipy import linalg
	A = np.array([[1,2],[3,4]])
	print(A)
	print(linalg.inv(A))
	
	b=np.array([[5],[6]])
	print(b.T)
	print(A*b)
	print(np.linalg.solve(A,b))
	print(linalg.det(A))
	
	import numpy as np
	from scipy import linalg
	import matplotlib.pyplot as plt
	
	c1, c2 = 5.0, 2.0
	i = np.r_[1:11]
	xi = 0.1*i
	yi = c1*np.exp(-xi) + c2*xi
	zi = yi + 0.05 * np.max(yi) * np.random.randn(len(yi))
	A = np.c_[np.exp(-xi)[:, np.newaxis], xi[:, np.newaxis]]
	print(linalg.lstsq(A, zi))
	
	import numpy as np
	from scipy import linalg
Example #59
0
 def time_det(self, size, contig, module):
     if module == 'numpy':
         nl.det(self.a)
     else:
         sl.det(self.a)
Example #60
0
print(np.matmul(A,resMat))
print("Least squares solution")
print(np.matmul(A,resMatLstSq))

#f) Solve the eigenvalue problem for A
eigenvals, eigenvecs = la.eig(A)

print("Eigenvalues: ", eigenvals)
print("Eigenvectors: ", eigenvecs)
#print(np.matmul(A,eigenvecs[:,1]))
#print(np.dot(eigenvals[1],eigenvecs[:,1]))

print("Difference between A*eigvec and eigval*eigvec: ", la.norm(np.matmul(A,eigenvecs[:,1])-np.dot(eigenvals[1],eigenvecs[:,1])))

#g) Calculate the inverse, determinant of A
invA = la.inv(A)
print("Inverse of A: ", invA)

detA = la.det(A)
print("Determinant of A: ", detA) 

#Calculate norm of A with different orders
norm2A = la.norm(A, ord=2)
print("Matrix 2-norm of A: ", norm2A)

normMaxRowsA = la.norm(A, ord=1)
print("Max of sum over rows of A: ", normMaxRowsA)

normMaxColsA = la.norm(A, ord=np.inf)
print("Max of sum over cols of A: ", normMaxColsA)