def compute_gx(mu_1,mu_2,cov_1,cov_2,w1,w2,x):

    const = 0.5 * math.log(linalg.det(cov_2)/linalg.det(cov_1)) + math.log(float(w1)/w2)
    #print "const",const
    #print "xx",(0.5* (x-mu_2) * cov_2.I * (x-mu_2).T ) - (0.5* (x-mu_1) * cov_1.I * (x-mu_1).T )
    gx = (0.5 * (x-mu_2) * cov_2.I * (x-mu_2).T ) - (0.5* (x-mu_1) * cov_1.I * (x-mu_1).T )
    #print gx
    v = gx.tolist()
    return v[0][0]+const

    '''
示例#2
0
def findFlip(t1, t2): # Each arg is a array of coordinates
    """Check if the provided triangles are flipped

    @param t1: 2x3 matrix consisting of coordinates of triangle in
    frame1
    @param t2: 2x3 matrix consisting of coordinates of triangle in
    frame2
    
    @returns: True if the frames are flipped

    """
    det1 = l.det(concatenate((t1,ones((1,3)))))
    det2 = l.det(concatenate((t2,ones((1,3)))))
    return True if (det1/det2 < 0) else False
    pass
示例#3
0
def _build_local_f(element_points, S_e):
    integration_point = np.array([
        [-1.0 / np.sqrt(3.0), -1.0 / np.sqrt(3.0)],
        [+1.0 / np.sqrt(3.0), -1.0 / np.sqrt(3.0)],
        [+1.0 / np.sqrt(3.0), +1.0 / np.sqrt(3.0)],
        [-1.0 / np.sqrt(3.0), +1.0 / np.sqrt(3.0)],
    ])

    w = np.array([1.0, 1.0, 1.0, 1.0])

    N = np.zeros(shape=(1, 4))
    f = np.zeros(shape=(4, 1))
    for i in range(4):
        r, s = integration_point[i]

        N[0, 0] = (1.0 - r) * (1.0 - s) / 4.0
        N[0, 1] = (1.0 + r) * (1.0 - s) / 4.0
        N[0, 2] = (1.0 + r) * (1.0 + s) / 4.0
        N[0, 3] = (1.0 - r) * (1.0 + s) / 4.0

        grad_N = np.array([
            [-(1.0 - s) / 4.0, -(1.0 - r) / 4.0],
            [+(1.0 - s) / 4.0, -(1.0 + r) / 4.0],
            [+(1.0 + s) / 4.0, +(1.0 + r) / 4.0],
            [-(1.0 + s) / 4.0, +(1.0 - r) / 4.0],
        ])
        J = np.dot(grad_N.T, element_points)
        dJ = det(J)

        f += w[i] * N.T * S_e * dJ

    return f
示例#4
0
def lwlr(testPoint, xArr, yArr, k=1.0):
    '''
    局部加权线性回归,
    回归系数计算公式:w = (X^TWX)^(-1)X^TWy
    高斯核计算公式:w(i, i) = exp{[x^(i) - x] / (-2 * k^2)}
    :param testPoint: 坐标点
    :param xArr: 输入值
    :param yArr: 真实值
    :param k: 高斯核参数,用户自定义
    :return:
    '''
    xMat = mat(xArr)
    yMat = mat(yArr).T
    m = shape(xMat)[0]
    weights = mat(eye((m)))  # 初始化权重矩阵

    for j in range(m):
        diffMat = testPoint - xMat[j, :]
        weights[j, j] = exp(diffMat * diffMat.T / (-2.0 * k ** 2))  # 高斯核

    xTx = xMat.T * (weights * xMat)

    if linalg.det(xTx) == 0.0:  # 判断矩阵是否可逆
        print "This matrix is singular, cannot do inverse"
        return

    ws = xTx.I * (xMat.T * (weights * yMat))

    return testPoint * ws
示例#5
0
 def readSamples(self, fileName, key,recalc=False,samples=None):
     fn = fileName + ".pre"
     try:
         if recalc: raise IOError()
         with open(fn): pass
         print "precalculated file present"
         self.mu, self.cov = hsplit(mat(fromfile(fn).reshape((3,-1))),[1])
     except IOError:
         if samples != None:
             self._samples = samples
             print "got samples: " , self._samples
         else:
             print "no file present, calculating..."
             smpls = loadmat(fileName)[key]
             print "loaded from mat file"
             self._samples = mat(smpls)
             print "reshaped into samples"
         self.mu = sum(self._samples, axis=1) / self._samples.shape[1]
         print "mu=", str(self.mu)
         sampdiffmu = self._samples - self.mu
         self.cov = sampdiffmu*sampdiffmu.T / self._samples.shape[1]
         print"cov=", str(self.cov)
         mat(hstack((self.mu,self.cov))).tofile(fn)
     self._invCov = self.cov.I
     self._detCov = det(self.cov)
     self._multConst = 1 / sqrt((2 * pi) ** 3 * self._detCov)
示例#6
0
 def getRelEnt(self, m1, m2, d):
     """
     Estimates relative entropy D(dist2||dist1) from moments of dist1 and
     dist2 in nats.
             
     :param m1: moments instance for dist1  
     :param m2: moments instance for dist2
     :param d: dimensionality
     :return Dpart, deltamu: Covariance-dependent part and mean dependent
     part of the relative entropy
     """
     Dpart = -log(det(m2.cov) / det(m1.cov))
     Dpart += trace(m2.cov * m1.icov) - d
     diff = m1.mean - m2.mean
     deltamu = (diff * m1.icov * diff.T).A1[0]
     return Dpart, deltamu
示例#7
0
 def getRelEnt(self, m1, m2, d):
     """
     Estimates relative entropy D(dist2||dist1) from moments of dist1 and
     dist2 in nats.
             
     :param m1: moments instance for dist1  
     :param m2: moments instance for dist2
     :param d: dimensionality
     :return Dpart, deltamu: Covariance-dependent part and mean dependent
     part of the relative entropy
     """
     Dpart = -log(det(m2.cov) / det(m1.cov))
     Dpart += trace(m2.cov * m1.icov) - d
     diff = m1.mean - m2.mean
     deltamu = (diff * m1.icov * diff.T).A1[0]
     return Dpart, deltamu
def print_matrix_with_meta_data(filename, matrix):
    np.save(os.path.join(DATA_DIR, filename), matrix)
    details_file = open(os.path.join(DATA_DIR, filename + '.txt'), 'w')
    try:
        details_file.write(np.array2string(matrix) + "\n")
        details_file.write("\ndeterminer:                " +
                           str(linalg.det(matrix)) + "\n")
        details_file.write("inverse matrix determiner: " +
                           str(linalg.det(linalg.inv(matrix))) + "\n")
        details_file.write("euclidean norm:            " +
                           str(linalg.norm(matrix, 'fro')) + "\n")
        details_file.write("euclidean norm \nof inverse matrix:         " +
                           str(linalg.norm(linalg.inv(matrix), 'fro')) + "\n")
        details_file.write("condition number:          " +
                           str(linalg.cond(matrix, 'fro')))
    finally:
        details_file.close()
示例#9
0
def Invcopt(W,vk,XDim,K):
    invc = [None for _ in range(K)]
    for k in range(K):
        dW = det(W[k])
        print 'dW',dW
        if dW>1e-30: ld = log(dW)
        else: ld = 0.0
        invc[k] = sum([digamma((vk[k]+1-i) / 2.) for i in range(XDim)]) + XDim*log(2) + ld
    return invc
示例#10
0
def computeFundamentalMatrix2(ref, img) : 
    pmat1 = ref.pmat
    pmat2 = img.pmat 
    p00 = pmat1[0]
    p01 = pmat1[1]
    p02 = pmat1[2]
    p10 = pmat2[0]
    p11 = pmat2[1]
    p12 = pmat2[2]

    fmat = np.array([
        [det((p01, p02, p11, p12)), det((p01, p02, p12, p10)), det((p01, p02, p10, p11))],
        [det((p02, p00, p11, p12)), det((p02, p00, p12, p10)), det((p02, p00, p10, p11))],
        [det((p00, p01, p11, p12)), det((p00, p01, p12, p10)), det((p00, p01, p10, p11))]
    ])

    return -fmat
示例#11
0
    def complementary(self, m1, m2, d):
        """
        Return expected relative entropy, surprise and standard deviation
        for dist1 being the prior of dist2.

        :param m1: moments instance for dist1  
        :param m2: moments instance for dist2
        :param d: dimensionality
        :returns D, ere, S, sigmaD, lambdas, deltamu: Relative entropy,
        expected relative entropy, surprise, sigma(D), lambdas and dmu of 
        generalized chi-squared
        """
        Dpart, deltamu = self.getRelEnt(m1, m2, d)
        ASigma = matrix(identity(d) - m1.icov * m2.cov)
        lambdas = eig(ASigma)[0]
        ere = -.5 * log(det(m2.cov) / det(m1.cov))
        D = .5 * (Dpart + deltamu)
        S = D - ere
        sigmaD = trace(ASigma * ASigma)
        sigmaD = sqrt(.5 * sigmaD)
        return D, ere, S, sigmaD, lambdas, deltamu
示例#12
0
    def complementary(self, m1, m2, d):
        """
        Return expected relative entropy, surprise and standard deviation
        for dist1 being the prior of dist2.

        :param m1: moments instance for dist1  
        :param m2: moments instance for dist2
        :param d: dimensionality
        :returns D, ere, S, sigmaD, lambdas, deltamu: Relative entropy,
        expected relative entropy, surprise, sigma(D), lambdas and dmu of 
        generalized chi-squared
        """
        Dpart, deltamu = self.getRelEnt(m1, m2, d)
        ASigma = matrix(identity(d) - m1.icov * m2.cov)
        lambdas = eig(ASigma)[0]
        ere = -.5 * log(det(m2.cov) / det(m1.cov))
        D = .5 * (Dpart + deltamu)
        S = D - ere
        sigmaD = trace(ASigma * ASigma)
        sigmaD = sqrt(.5 * sigmaD)
        return D, ere, S, sigmaD, lambdas, deltamu
示例#13
0
 def pdf(self):
    """ Partially applied Gaussian pdf """
    dim     = self.mean.shape[0]
    const   = 1 / (((2*numpy.pi)**(dim/2.0)) * (det(self.cov)**0.5))
    inv_cov = inv(self.cov)
    def gauss_pdf(x):
       sub = x - self.mean
       exponent = -0.5* sub.T * inv_cov * sub
       if (numpy.shape(exponent) != (1,1)):
          raise AssertionError
       return const * (numpy.e ** exponent[0,0])
    return gauss_pdf  
def compute_gix(mu,cov,wi,x):
    dim = len(x)
    x = matrix(x)
    mu = matrix(mu)
    cov = matrix(cov)
    part1 = math.log(wi)
    part2 = math.log(float(1)/math.pow(math.pi*2,dim/2)* linalg.det(cov))
    part3 = -0.5*(x-mu)*cov.I*(x-mu).T
    #print part1
    #print part2
    #print part3.tolist()[0][0]
    #print ""
    return part1+part2+part3.tolist()[0][0]
示例#15
0
 def optimize_vector(self, i_vec):
     """
     Optimizes torsion and bond angles by rotation around
     one atom i.
     1. moves coordinate origin to that atom.
     2. generate a rotation matrix from a singular value decompositon
     3. rotate all atoms after i.
     """
     center = self.moving[i_vec]
     moving_coords = self.get_moving_coords(center)
     fixed_coords = self.get_fixed_coords(center)
     # Do singular value decomposition
     a = dot(fixed_coords, transpose(moving_coords))
     u, d, vt = linalg.svd(a)
     # Check reflection
     if (linalg.det(u) * linalg.det(vt)) < 0:
         u = dot(u, S)
     # Calculate rotation
     rot_matrix = dot(u, vt)
     # Apply rotation
     if self.accept_rotation():
         self.apply_rotation(rot_matrix, i_vec, center)
示例#16
0
 def optimize_vector(self, i_vec):
     """
     Optimizes torsion and bond angles by rotation around
     one atom i.
     1. moves coordinate origin to that atom.
     2. generate a rotation matrix from a singular value decompositon
     3. rotate all atoms after i.
     """
     center = self.moving[i_vec]
     moving_coords = self.get_moving_coords(center)
     fixed_coords = self.get_fixed_coords(center)
     # Do singular value decomposition
     a = dot(fixed_coords, transpose(moving_coords))
     u, d, vt = linalg.svd(a)
     # Check reflection
     if (linalg.det(u) * linalg.det(vt))<0:
         u = dot(u, S)
     # Calculate rotation
     rot_matrix = dot(u, vt)
     # Apply rotation
     if self.accept_rotation():
         self.apply_rotation(rot_matrix, i_vec, center)
示例#17
0
    def pdf(self):
        """ Partially applied Gaussian pdf """
        dim = self.mean.shape[0]
        const = 1 / (((2 * numpy.pi)**(dim / 2.0)) * (det(self.cov)**0.5))
        inv_cov = inv(self.cov)

        def gauss_pdf(x):
            sub = x - self.mean
            exponent = -0.5 * sub.T * inv_cov * sub
            if (numpy.shape(exponent) != (1, 1)):
                raise AssertionError
            return const * (numpy.e**exponent[0, 0])

        return gauss_pdf
示例#18
0
 def pdf_mat(self):
    """ Return a partially applied Gaussian pdf that takes in a matrix whose columns are the input vectors"""
    dim     = self.mean.shape[0]
    const   = 1 / (((2*numpy.pi)**(dim/2.0)) * (det(self.cov)**0.5))
    inv_cov = inv(self.cov)
    def gauss_pdf_mat(x):
       """Partially applied Gaussian pdf that takes in a matrix whose columns are the input vectors"""
       sub = x - self.mean
       r0 = inv_cov * sub
       exponent = -0.5 * numpy.sum(sub.A * r0.A, axis=0)
       if (numpy.shape(exponent) != (x.shape[1],)):
          raise AssertionError("exponent has the wrong shape, should be (%d,), but is (%d,)" % x.shape[1], exponent.shape[0])
       g = const * (numpy.e ** exponent)
       return g
    return gauss_pdf_mat
示例#19
0
def gaussian_mvn_pdf(X, mean, cov):
    '''
    Return posterior probabilities approximated by a Gaussian with provided mean and covariance.
    Params:
        X: Data to be classified (Dx1)
        mean: Mean vector of the data (Dx1)
        cov: Covariance matrix of the data (DxD)
    Returns:
        p: posterior probabilities
    '''
    D = det(cov)
    inv_cov = inv(cov)
    X_shift = X - mean
    p_1 = 1 / (((2 * np.pi)**(len(mean) / 2)) * (D**(1 / 2)))
    p_2 = (-1 / 2) * ((X_shift.T) @ (inv_cov) @ (X_shift))
    prior_prob = p_1 * np.exp(p_2)
    return prior_prob
示例#20
0
def generativeSeperateCov(trainData, trainLabels, testData, testLabels):
    (numClasses, N, mu, Slist, pList) = estimateGaussian(trainData, trainLabels)
    
    numCorrect = 0
    for x,t in izip(testData, testLabels):
        pXgivenClassList = []
        for i in range(numClasses):
            pXgivenClassList.append(1/sqrt(det(Slist[i])) + exp(-0.5 * dot(dot((x - mu[i]), inv(Slist[i])), (x-mu[i]))))
        a = log((pXgivenClassList[1]*pList[1]) / (pXgivenClassList[0]*pList[0]))
        probClass1 = sigmoid(a)
        if probClass1 >= 0.5:
            if t == 1:
                numCorrect += 1
        else:
            if t == 0:
                numCorrect += 1
    return float(numCorrect) / float(len(testLabels))
示例#21
0
    def probs(x, u, sigma):
        """
        求x在以均值为u,协方差为sigma的多元高斯分布中的概率密度
        :param x: 随机变量,每一行是一个特征向量
        :param u: 均值
        :param sigma: 协方差矩阵
        :return:
        """
        dx = x - u
        dx_div_sigma = dx @ la.inv(sigma)
        p = np.zeros((x.shape[0], ), dtype=dx.dtype)
        for i in range(len(p)):
            p[i] = np.dot(dx_div_sigma[i], dx[i])
        p *= -0.5

        n = sigma.shape[0]
        d = 1.0 / np.sqrt((2 * np.pi)**n * la.det(sigma))
        return np.exp(p) * d
示例#22
0
def norm_pdf_multivariate(x, mu, sigma):
    print(x)
    print(mu)
    print(sigma)
    size = len(x)
    if size == len(mu) and (size, size) == sigma.shape:
        det = linalg.det(sigma)
        if det == 0:
            raise NameError("The covariance matrix can't be singular")
        norm_const = 1.0 / (np.math.pow(
            (2 * np.pi),
            float(size) / 2) * np.math.pow(det, 1.0 / 2))
        x_mu = np.matrix(x - mu)
        #     print(sigma)
        inv = sigma.I
        result = np.math.pow(np.math.e, -0.5 * (x_mu * inv * x_mu.T))
        return norm_const * result
    else:
        raise NameError("The dimensions of the input don't match")
示例#23
0
def _build_local_Ke(element_points, omega, mu, eta):
    integration_point = np.array([
        [-1.0 / np.sqrt(3.0), -1.0 / np.sqrt(3.0)],
        [+1.0 / np.sqrt(3.0), -1.0 / np.sqrt(3.0)],
        [+1.0 / np.sqrt(3.0), +1.0 / np.sqrt(3.0)],
        [-1.0 / np.sqrt(3.0), +1.0 / np.sqrt(3.0)],
    ])

    w = np.array([1.0, 1.0, 1.0, 1.0])

    N = np.zeros(shape=(1, 4))
    M_hat = np.zeros(shape=(4, 4))
    C_hat = np.zeros(shape=(4, 4), dtype=np.complex)
    K_hat = np.zeros(shape=(4, 4))
    for i in range(4):
        r, s = integration_point[i]

        N[0, 0] = (1.0 - r) * (1.0 - s) / 4.0
        N[0, 1] = (1.0 + r) * (1.0 - s) / 4.0
        N[0, 2] = (1.0 + r) * (1.0 + s) / 4.0
        N[0, 3] = (1.0 - r) * (1.0 + s) / 4.0

        grad_N = np.array([
            [-(1.0 - s) / 4.0, -(1.0 - r) / 4.0],
            [+(1.0 - s) / 4.0, -(1.0 + r) / 4.0],
            [+(1.0 + s) / 4.0, +(1.0 + r) / 4.0],
            [-(1.0 + s) / 4.0, +(1.0 - r) / 4.0],
        ])

        J = np.dot(grad_N.T, element_points)
        dJ = det(J)

        inv_J = np.array([
            [J[1, 1], -J[0, 1]],
            [-J[1, 0], J[0, 0]],
        ])
        B = (1.0 / dJ) * np.dot(inv_J, grad_N.T)

        M_hat += w[i] * -(omega**2.0) * mu * np.dot(N.T, N) * dJ
        C_hat += w[i] * 1j * omega * eta * np.dot(N.T, N) * dJ
        K_hat += w[i] * np.dot(B.T, B) * dJ
    K = M_hat + C_hat + K_hat
    return K
示例#24
0
def standRegress(data_matrix, label_matrix):
    """
    标准回归
    :param data_matrix:
    :param label_matrix:
    :return:
    """
    # 准备xTx 即x的转置乘x阵,用于求行列式从而判断是否可逆 [m-1,m-1] = [m-1,n] * [n,m-1]
    xTx = data_matrix.T * data_matrix
    # print(xTx)
    # 判断行列式
    if linalg.det(xTx) == 0:
        print("该矩阵不可逆")
        return

    # 若矩阵可逆 直接使用最小二乘法(即公式法)来算系数矩阵w w = [m-1,m-1] * [m-1,n] * [n,1] = [m-1,1]
    w = xTx.I * data_matrix.T * label_matrix
    print("W阵:", w)
    return w
示例#25
0
    def pdf_mat(self):
        """ Return a partially applied Gaussian pdf that takes in a matrix whose columns are the input vectors"""
        dim = self.mean.shape[0]
        const = 1 / (((2 * numpy.pi)**(dim / 2.0)) * (det(self.cov)**0.5))
        inv_cov = inv(self.cov)

        def gauss_pdf_mat(x):
            """Partially applied Gaussian pdf that takes in a matrix whose columns are the input vectors"""
            sub = x - self.mean
            r0 = inv_cov * sub
            exponent = -0.5 * numpy.sum(sub.A * r0.A, axis=0)
            if (numpy.shape(exponent) != (x.shape[1], )):
                raise AssertionError(
                    "exponent has the wrong shape, should be (%d,), but is (%d,)"
                    % x.shape[1], exponent.shape[0])
            g = const * (numpy.e**exponent)
            return g

        return gauss_pdf_mat
示例#26
0
def ridgeRegres(xMat, yMat, lam=0.2):
    '''
    岭回归:处理特征数多于样本数的情况,引入lam*eye(m)使得矩阵非奇异
    ws = (X^TX + lam*eye(m)).I * X^T * y
    :param xMat: 输入值
    :param yMat: 真实值
    :param lam: 参数 lam
    :return: 回归系数 ws
    '''
    xTx = xMat.T * xMat
    demo = xTx + eye(shape(xMat)[1]) * lam

    if linalg.det(demo) == 0.0:  # 判断是否可逆
        print "This matrix is singular, cannot do inverse"
        return

    ws = demo.I * (xMat.T * yMat)

    return ws
示例#27
0
    def platitude(self):
        '''
        ATTENTION, coute cher !!
        Détermine un coefficient de platitude du polyligne.
        (platitude=0.0 si les points sont coplanaires)
        On parcours tous les tetrahedres T(i,j,k)=A,Xi,Xj,Xk. où A=X[0]
        On calcule le volume Vt(i,j,k) de T(i,j,k).
        On calcule la sphere circonscrite S(i,j,k) et son volume Vs(i,j,k)

        Si T est grand (4 aretes de meme longueur) alors Vs=Vt.3.pi.sqrt(3).
        T est 'plat' si 3.pi.sqrt(3).Vt/Vs est petit devant 1.
        On fait la moyenne des coefficients a(i,j,k) = 3.pi.sqrt(3).Vt(i,j,k)/Vs(i,j,k)
        '''
        self.nbappelsplatitude += 1
        if self.nbappelsplatitude < 10:
            stack('nbappelsplatitude=%d' % self.nbappelsplatitude)
        n = len(self.points)
        #         print n
        X = self.points
        if n <= 3: return 0.0
        A = X[0]
        moy = 0.0
        nt = 0
        cs, cv = 4.0 * math.pi / 3.0, 3 * math.pi * math.sqrt(3)
        for i in range(1, n):
            B = X[i]
            AB = B - A
            for j in range(i + 1, n):
                C = X[j]
                AC = C - A
                for k in range(j + 1, n):
                    D = X[k]
                    AD = D - A
                    nt += 1
                    D = X[k]
                    [I, r] = sphereCirconscriteA4Points(A, B, C, D)
                    if I is not None:
                        Vs = cs * r**3
                        Vt = abs(det(AB, AC, AD)) / 6.0
                        moy += cv * Vt / Vs
        self._platitude = moy / nt
        return moy / nt
示例#28
0
def standRegres(xArr, yArr):
    '''
    使用普通最小二乘法(OSL,平方误差),计算最佳拟合直线的回归系数:
        W = (X^T * X)^(-1) * X^T * y
    :param xArr: 给定数据 X
    :param yArr: 真实数据值 Y
    :return: 回归系数集合
    '''
    xMat = mat(xArr)
    yMat = mat(yArr).T  # 转置
    xTx = xMat.T * xMat  # 计算 X^T * X

    # 判断矩阵是否可逆,依据:方阵的行列式的值是否为0
    if linalg.det(xTx) == 0.0:
        print "该矩阵不可逆"
        return

    ws = xTx.I * (xMat.T * yMat)  # xTx.I 计算矩阵的逆矩阵
    # ws = linalg.solve(xTx, xMat.T * yMat)
    return ws
def lwlr(testPoint, data_matrix, label_matrix, k=0.5):
    weights = np.mat(np.eye(len(data_matrix)))
    for i in range(len(data_matrix)):
        # 计算该实例与其他点的距离
        distance_matrix = testPoint - data_matrix[i, :]
        # print("testPoint:", testPoint)
        # print("data_i:", data_matrix[i, :])
        # print("distance:", distance_matrix)
        weights[i, i] = np.exp(distance_matrix * distance_matrix.T /
                               (-2.0 * k**2))
    xTx = data_matrix.T * (weights * data_matrix)
    if linalg.det(xTx) == 0:
        print("行列式为0")
        return

    # print(np.shape(xTx))
    # print(np.shape(data_matrix.T))
    # print(np.shape(label_matrix))
    w = xTx.I * (data_matrix.T * (weights * label_matrix))
    return testPoint * w
示例#30
0
from numpy.core.fromnumeric import var
from numpy.linalg.linalg import det

from sage.all import *
from utilsymbolic import *

x,y,z = var('x','y','z')

X = symMatrix( 3, 3 , 'X')
Y = symMatrix( 3, 3 , 'Y')
Z = symMatrix( 3, 3 , 'Z')
W = symMatrix( 3, 3 , 'W')

E = x*X + y*Y + z*Z + 1*W
EE = E*E.T

eq1=det(E)
eq2=EE*E-0.5*EE.trace()*E

eqs = (eq1,eq2[0,0],eq2[0,1],eq2[0,2],eq2[1,0],eq2[1,1],eq2[1,2],eq2[2,0],eq2[2,1],eq2[2,2])

keysA = ('x^3','y^3','x^2*y','x*y^2','x^2*z','x^2','y^2*z','y^2','x*y*z','x*y')
keysB = ('x*z^2','x*z','x','y*z^2','y*z','y','z^3','z^2','z','')

# print out machine code for the linear system
printData('A',eqs,keysA)
printData('B',eqs,keysB)

print 'Done'
示例#31
0
def sbgn_solver(Data,
                Model,
                Jacobian,
                Prior,
                TOL=1.0e-6,
                MAXIT=10,
                ALPHA=0.2,
                BETA=0.5,
                QUIET=False):
    """ 
    sbgn_solver - Scalar Bayesian Gauss-Newton (sbgn) solver for a 
    parameter estimation problem to fit a possibly non-linear model to
    a series of scalar observations poluted by IID Gaussian noise.
    
    This function seeks a solution to the maximum posterior probability of the
    parameters x and inverse noise variance s, given the measured Data, 
    Prior information, and IID Gaussian measurement noise:
    
    maximize p( x, s | Data, Model, Prior ).
    
    A guarded Gauss-Newton method is used to find a local solution to this 
    problem by successively approximating:
    
    Model(x - xo) ~ D*(x-xo) + Model(xo),
    
    where D is the Jacobian of the model function, so the approximation
    represents the local linear behavior of the model.
    
    Inputs:
    -------
    
    Data: array-like vector of measured data
    
    Model: a function handle to the model function with the following
    prototype:
    
    g = Model(x),
    
    where x is a d-dimensional parameter vector, and g is equal to the model 
    evaluated at x (i.e. g = Model(x)).
    
    Jacobian: a function handle to evaluate the Jacobian of the model at x,
    with the following prototype:
    
    D = Jacobian(x),
    
    where D is the Jacobian matrix used as the local linear approximation 
    to the model function, which is defined as follows:
    
    D = [dg_1/dx_1 dg_1/dx_2 ... dg_1/dx_d;
    
         dg_2/dx_1 dg_2/dx_2 ... dg_2/dx_d;
         
         ...
         
         dg_n/dx_1 dg_n/dx_2 ... dg_n/dx_d];
    
    
    Prior: Dictionary containing prior statistical information regarding the
    parameters x and s, including the initial guess, with the fields
    
        x_mean: dx1 vector of mean values for the nominal parameters.
      
        iSigma_x: dxd prior inverse covariance matrix for the model parameters
    
        psig: prior inverse variance exponential distribution parameter, defined
        such that psig represents worst case anticipated model and/or sensor
        error variance (before any measurements are made). 
                
            Note: psig replaces lambda in previous versions.
    
        xo: initial guess for starting the Gauss-Newton algorithm
    
    
    Optional named parameters for algorithm options:
    
        TOL: exit tolerance
         
        ALPHA: backtracking line search parameter 
       
        BETA: backtracking line search parameter
        
        MAXIT: maximum number of iterations to run
       
        QUIET: if true, progress output text is suppressed.
       
    
    Outputs:
    --------
    
    Est: Dictionary containing the optimal estimate and accuracy information 
    in the following fields:
    
        x_est: dx1 vector containing the estimate for all parameters
            
        s_est: scalar estimate for the inverse variance
            
        iSigma_est: inverse marginal parameter estimation covariance
       
        iSigma_xs_est: inverse joint covariance of x_est and s_est
    
        lnZ: estimated log evidence of the observed data
              
        model: nx1 vector containing the model output at x_est
            
        fo: 1x1 scalar objective value at (x_est, s_est)
               
        status: boolean status indicating convergence
    
    Note: iSigma_est, iSigma_xs_est, and lnZ are based on a local quadratic
    approximation of the objective function at the optimal solution and 
    MAY have poor accuracy. This can/should be checked using MCMC methods,
    like the one provided by the bgn_ns_solver function (has yet to be
    implemented in python).
    
    Finally, note, this version was ported over from Matlab code and is 
    not yet well tested for use with models that output complex numbers.
    TODO: Devise a test case for this scenario
    
       
    """

    # convert input Data vector to a numpy array (in case it is not already)
    Data = np.array(Data)

    # convert to column vector (in-place), which is needed for the computation
    Data.resize((np.alen(Data), 1))

    # calculate number of observations n, which includes both real and
    # imaginary parts (counted seperately)
    if np.all(np.isreal(Data)):
        n = np.alen(Data)
    else:
        n = 2. * np.alen(Data)

    x_mean = Prior['x_mean']
    iSigma_x = Prior['iSigma_x']
    psig = Prior['psig']

    xo = Prior['xo'].copy()
    go = Model(xo)

    # Setup and run the Gauss-Newton solver

    # define the objective function
    sumsq = lambda x: x.conj().transpose().dot(x)

    # quadratic sum with symmetric matrix Q (for real x only)
    qsumsq = lambda x, Q: x.transpose().dot(Q.dot(x))

    # evaluate the objective function using current model evaluated at x (m)
    objfun  = lambda x,m,s: np.real( s*sumsq(m-Data) - n*np.log(s) + 2.*psig*s \
                          + qsumsq(x-x_mean, iSigma_x) )

    # analytical measurement precision update
    # using the current model evaluated at x (m).
    supdate = lambda m: n / (sumsq(m - Data) + 2.0 * psig)

    # note: the above lambda functions make use of the current model output at x,
    # and therefore do not require any model evaluations.

    # initialize convergence status
    status = True
    # note: this starts as true and is set to false if there is a problem.

    # print progress output headers
    if not QUIET:
        hbar = '-' * 70
        print '\nBayesian Gauss-Newton Solver 2.1'
        print hbar
        print '   Solving a %i-dimensional problem.\n' % np.alen(x_mean)

        # print algorithm progress feedback headers
        headers = ('Norm(dx)', 'Objective', 'Step Size', 'Norm(gradient)')
        print '%11s%17s%14s%18s' % headers
        print hbar

    # initialize the no improvement counter
    no_imp_cnt = 0

    # solve for an optimal change in x
    for k in range(MAXIT):

        # On entry, xo and go are initialized above,
        # On repeat, xo and go are updated below.

        # update the Jacobian matrix at the current xo
        D = Jacobian(xo)
        b = Data - go
        c = x_mean - xo

        # compute the noise update first
        so = supdate(go)
        S = (1 / so) * iSigma_x

        # compute the current value of the objective function
        objfun_o = objfun(xo, go, so)

        # solve for the optimal update
        dx = linalg.solve(
            np.real(sumsq(D)) + S,
            np.real(D.conj().transpose().dot(b)) + S.dot(c))

        # compute the objective function gradient
        g = -2.0 * so * np.real(
            D.conj().transpose().dot(b)) - 2. * iSigma_x.dot(c)
        # note the minus sign because of definition of b and c above

        # line-search guard to ensure descent
        t = 1.0
        while True:
            xt = xo + t * dx
            gt = Model(xt)
            #[0];
            objfun_t = objfun(xt, gt, so)

            if objfun_t > objfun_o + ALPHA * t * g.transpose().dot(dx):
                t = BETA * t
            else:
                break

        # if the objective is not improved after 3 tries, exit
        if objfun_t >= objfun_o and t < 1.0:
            no_imp_cnt += 1
            if not QUIET:
                print 'No improvement made to objective. Strike {}.'.\
                format(no_imp_cnt)
            if no_imp_cnt == 3:
                print 'No improvement made to objective. Exiting.'
                status = False
                break
        else:
            # reset the counter
            no_imp_cnt = 0

        # update current guess and model output.
        xo = xt
        go = gt

        # print progress info
        if not QUIET:
            print '%11.3f%17.7f%14.2f%18.3f' % (linalg.norm(dx), objfun_t, t,
                                                linalg.norm(g))

        # exit conditions
        if (linalg.norm(dx) <= TOL):
            if not QUIET:
                print "\nNorm(dx) less than TOL. Done."
            break
        # check norm of gradient
        elif (linalg.norm(g) <= TOL):
            if not QUIET:
                print "\nGradient less than TOL. Done."
            break
        # note: if the norm of the gradient is small, than the uncertainty
        # analysis computed below should be representative, and even though
        # the parameter update step may be non-zero, the estimate may have
        # been found accurately enough relative to the uncertainty.

    else:
        status = False
        print '\nBayesian Gauss-Newton did NOT converge after max iterations.\n'

    if not QUIET:
        print hbar

    # get the objective function value on exit
    fo = objfun(xo, go, so)

    # diagnostics
    if not QUIET:
        print 'Objective on exit = %0.6f' % fo

    # compute the final Jacobian at xo
    D = Jacobian(xo)

    # diagnostics: compute the gradient at the solution
    b = Data - go
    c = x_mean - xo
    g = -2.0 * so * np.real(
        D.conj().transpose().dot(b)) - 2.0 * iSigma_x.dot(c)

    if not QUIET:
        print 'Norm of gradient on exit = %f\n' % linalg.norm(g)

    #
    # Compute the estimation accuarcy (covariance)
    #

    # compute the parameter estimation error
    iSigma_est = so*np.real(sumsq(D)) + iSigma_x - \
                 (2.0*so**2/n)*np.real( sumsq(b.conj().transpose().dot(D)) )

    Dtb = np.real(D.conj().transpose().dot(b))
    iSigma_xs_est = np.vstack((np.hstack(
        (so * np.real(sumsq(D)) + iSigma_x, -Dtb)),
                               np.hstack((-Dtb.transpose(), n / (2 * so**2)))))

    #iSigma_est = so*real(D'*D) + iSigma_x - (2*so^2/n)*real( (D'*b)*(b'*D) );

    #iSigma_xs_est  = [so*real(D'*D) + iSigma_x, -real(D'*b); ...
    #                           -real(b'*D),  n/(2*so^2)];

    #
    # Compute the evidence of the observed data under the BGN Model
    #

    d = xo.shape[0]
    lnK = (n/2.)*np.log(so/(2.*np.pi)) - (so/2.)*sumsq(b) \
          - (d/2.)*np.log(2.*np.pi) \
          + (1./2.)*np.log(linalg.det(iSigma_x)) - (1./2.)*qsumsq(c,iSigma_x) \
          + np.log(psig) - psig*so

    lnZ = lnK + ((d+1.)/2.)*np.log(2.*np.pi) \
               - (1./2.)*np.log(linalg.det(iSigma_xs_est))

    #
    # Define outputs
    #
    Est = {}
    Est['x_est'] = xo
    Est['s_est'] = so[0, 0]
    Est['iSigma_est'] = iSigma_est
    Est['iSigma_xs_est'] = iSigma_xs_est
    Est['lnZ'] = lnZ[0, 0]
    Est['model'] = go
    Est['fo'] = fo[0, 0]
    Est['status'] = status

    # note: so, lnZ, and fo by themselves 1x1 numpy arrays, which are converted
    # to scalars simply by accessing their first (and only) element.

    return Est
示例#32
0
def lsbgn_solver(Data,
                 D,
                 Prior,
                 pEst=None,
                 TOL=1.0e-6,
                 MAXIT=300,
                 MAXM=None,
                 QUIET=False):
    """ 
    lsbgn_solver - linear scalar Bayesian Gauss-Newton (lsbgn) solver for a 
    parameter estimation problem to fit a LINEAR model to a series of scalar 
    observations poluted by IID Gaussian noise.
    
    This function seeks a solution to the maximum posterior probability of the
    parameters x and inverse noise variance s, given the measured Data, 
    Prior information, and IID Gaussian measurement noise:
    
    maximize p( x, s | Data, Model, Prior ).
    
    For the linear function the model is:
    
    Model(x) = D*x (and the Jacobbian of the model w.r.t. x is D),
    
    where x is a d-dimensional parameter vector.
    
    
    Inputs:
    -------
    
    Data: array-like vector of measured data
    
    D: matrix defining the linear model function, as noted above.
    
    Prior: Dictionary containing prior statistical information regarding the
    parameters x and s, including the initial guess, with the fields
    
        x_mean: px1 vector of mean values for the nominal parameters.
      
        iSigma_x: pxp prior inverse covariance matrix for the model parameters
    
        psig: prior inverse variance exponential distribution parameter, defined
        such that psig represents worst case anticipated model and/or sensor
        error variance (before any measurements are made). 
    
        xo: initial guess for starting the Gauss-Newton algorithm
    
    
    Optional named parameters for algorithm options:
    
        pEst: prior estimation structure used to accumulate data 
    
        TOL: exit tolerance
        
        MAXIT: maximum number of iterations to run
       
        QUIET: if true, progress output text is suppressed.
       
    
    Outputs:
    --------
    
    Est: Dictionary containing the optimal estimate and accuracy information 
    in the following fields:
    
        x_est: dx1 vector containing the estimate for all parameters
            
        s_est: scalar estimate for the inverse variance
            
        iSigma_est: inverse marginal parameter estimation covariance
       
        iSigma_xs_est: inverse joint covariance of x_est and s_est
    
        lnZ: estimated log evidence of the observed data
              
        model: nx1 vector containing the model output at x_est
            
        fo: 1x1 scalar objective value at (x_est, s_est)
               
        status: boolean status indicating convergence
        
        Data: Data structure from previous run, used internally to update 
        with new data.
    
    Finally, note, this version was ported over from Matlab code and is 
    not yet well tested for use with models that output complex numbers.
    TODO: Devise a test case for this scenario
    
       
    """

    # convert input Data vector to a numpy array (in case it is not already)
    Data = np.array(Data)

    # convert to column vector (in-place), which is needed for the computation
    Data.resize((np.alen(Data), 1))

    # prefix previous data if available
    if pEst:
        Data = np.vstack((pEst['Data'][0], Data))
        D = np.vstack((pEst['Data'][1], D))

    # truncate if number of elements exceeds max permitted (memory limit)
    if MAXM and (np.alen(Data) > MAXM):
        Data = Data[-MAXM:, :]
        D = D[-MAXM:, :]

    # calculate number of observations n, which includes both real and
    # imaginary parts (counted seperately)
    if np.all(np.isreal(Data)):
        n = np.alen(Data)
    else:
        n = 2. * np.alen(Data)

    x_mean = Prior['x_mean']
    iSigma_x = Prior['iSigma_x']
    psig = Prior['psig']

    # set initial parameter estimate if a previous estimate is available
    if pEst:
        xo = pEst['x_est']
    elif 'xo' in Prior.keys():
        xo = Prior['xo']
    else:
        xo = x_mean

    # initialize the model output
    go = D.dot(xo)

    #
    # Setup and run the Gauss-Newton solver
    #

    # define the objective function
    sumsq = lambda x: x.conj().transpose().dot(x)

    # quadratic sum with symmetric matrix Q (for real x only)
    qsumsq = lambda x, Q: x.transpose().dot(Q.dot(x))


    objfun  = lambda x,m,s: np.real( s*sumsq(m-Data) - n*np.log(s) + 2.*psig*s \
                            + qsumsq(x-x_mean, iSigma_x) )

    supdate = lambda m: n / (sumsq(m - Data) + 2.0 * psig)

    # note: the above lambda functions make use of the current model output at x,
    # and therefore do not require any model evaluations.

    # initialize convergence status
    status = True
    # note: this starts as true and is set to false if there is a problem.

    # print progress output headers
    if not QUIET:
        hbar = '-' * 70
        print '\nLinear Bayesian Gauss-Newton Solver 2.0'
        print hbar
        print '   Solving a %i-dimensional problem.\n' % np.alen(x_mean)

        # print algorithm progress feedback headers
        headers = ('Norm(dx)', 'Objective', 'Step Size', 'Norm(gradient)')
        print '%11s%17s%14s%18s' % headers
        print hbar

    # solve for an optimal change in x
    for k in range(MAXIT):

        # On entry, xo and go are initialized above,
        # On repeat, xo and go are updated below.

        #go = D.dot(xo); #, D = Model(xo);
        b = Data - go
        c = x_mean - xo

        # compute the noise update first
        so = supdate(go)
        S = (1 / so) * iSigma_x

        # compute the current value of the ojective function
        # (without reevaluating the Model function)
        #objfun_o = so*sumsq(b) - n*np.log(so) + 2.0*psig*so \
        #             + qsumsq(c,iSigma_x); #c'*(iSigma_x*c);

        # solve for the optimal update
        dx = linalg.solve(
            np.real(sumsq(D)) + S,
            np.real(D.conj().transpose().dot(b)) + S.dot(c))

        # compute the objective function gradient
        g = -2.0 * so * np.real(
            D.conj().transpose().dot(b)) - 2. * iSigma_x.dot(c)
        # note the minus sign because of definition of b and c above

        # line-search guard to ensure descent
        t = 1.0
        #while objfun(xo + t*dx,so) > objfun_o + ALPHA*t*g.transpose().dot(dx):
        #    t = BETA*t;

        # if the objective is not improved then break
        xt = xo + t * dx
        gt = D.dot(xt)
        objfun_t = objfun(xt, gt, so)

        #if objfun_t > objfun_o:
        #    if not QUIET:
        #        print 'No improvement made to objective. Exiting.\n';
        #    break;

        # update current guess
        xo = xt
        go = gt

        # print progress info
        if not QUIET:
            print '%11.3f%17.7f%14.2f%18.3f' % (linalg.norm(dx), objfun_t, t,
                                                linalg.norm(g))

        if linalg.norm(dx) <= TOL:
            break

    else:
        status = False
        print '\nBayesian Gauss-Newton did NOT converge after max iterations.\n'

    if not QUIET:
        print hbar

    # get the objective function value on exit
    fo = objfun(xo, go, so)

    # diagnostics
    if not QUIET:
        print 'Objective on exit = %0.6f' % fo

    # diagnostics: compute the gradient at the solution
    b = Data - go
    c = x_mean - xo
    g = -2.0 * so * np.real(
        D.conj().transpose().dot(b)) - 2.0 * iSigma_x.dot(c)

    if not QUIET:
        print 'Norm of gradient on exit = %f\n' % linalg.norm(g)

    #
    # Compute the estimation accuarcy (covariance)
    #

    # compute the parameter estimation error
    iSigma_est = so*np.real(sumsq(D)) + iSigma_x - \
                 (2.0*so**2/n)*np.real( sumsq(b.conj().transpose().dot(D)) )

    Dtb = np.real(D.conj().transpose().dot(b))
    iSigma_xs_est = np.vstack((np.hstack(
        (so * np.real(sumsq(D)) + iSigma_x, -Dtb)),
                               np.hstack((-Dtb.transpose(), n / (2 * so**2)))))

    #
    # Compute the evidence of the observed data under the BGN Model
    #

    d = xo.shape[0]
    lnK = (n/2.)*np.log(so/(2.*np.pi)) - (so/2.)*sumsq(b) \
          - (d/2.)*np.log(2.*np.pi) \
          + (1./2.)*np.log(linalg.det(iSigma_x)) - (1./2.)*qsumsq(c,iSigma_x) \
          + np.log(psig) - psig*so

    lnZ = lnK + ((d+1.)/2.)*np.log(2.*np.pi) \
               - (1./2.)*np.log(linalg.det(iSigma_xs_est))

    #
    # Define outputs
    #
    Est = {}
    Est['x_est'] = xo
    Est['s_est'] = so[0, 0]
    Est['iSigma_est'] = iSigma_est
    Est['iSigma_xs_est'] = iSigma_xs_est
    Est['lnZ'] = lnZ[0, 0]
    Est['model'] = go
    Est['fo'] = fo[0, 0]
    Est['status'] = status
    Est['Data'] = [Data, D]

    # note: so, lnZ, and fo by themselves 1x1 numpy arrays, which are converted
    # to scalars simply by accessing their first (and only) element.

    return Est
示例#33
0
 def __calc_log_prior(s_mat, r, v):
     d = s_mat.shape[0]
     log_prior = LOG2 * (v * d / 2.0) + (d / 2.0) * np.log(2.0 * np.pi / r)
     log_prior += multigammaln(v / 2.0, d) - \
         (v / 2.0) * np.log(linalg.det(s_mat))
     return log_prior
示例#34
0
def volumeTetraedre1(A, B, C, D):
    """C'est det(AB,AC,AD)/6"""
    return det(B - A, C - A, D - A)
示例#35
0
        if y != 0:
            j = x + (y - 1) * m
            matrix[i][j] = -1
            inc += 1
        if y != n - 1:
            j = x + (y + 1) * m
            matrix[i][j] = -1
            inc += 1
        matrix[i][i] = inc        
    full = array(matrix)
    filter = [True for _ in xrange(n * m - 1)]
    return full.compress(filter, axis=0).compress(filter, axis=1)


#mp.dps = 6
def hl(n, m):
    lp = mpf(2 ** (n * m - 1)) / mpf(n) / mpf(m)
    rp = mpf(1)
    for i in xrange(n):
        for j in xrange(m):
            if not (i == 0 and j == 0):
                rp = rp * mpf(2 - math.cos(i * math.pi / n) - math.cos(j * math.pi / m))
    return lp * rp

arr = create_matrix(9, 12)
print det(arr)
print hl(100, 500)
#6.32023650698369e+25093


示例#36
0
 def setParams(self,mu,cov):
     self.mu = mu
     self.cov = cov
     self._invCov = inv(self.cov)
     self._detCov = det(self.cov)
     self._multConst = 1 / sqrt((2 * pi) ** 3 * self._detCov)
示例#37
0
            b[i] += koeff * b[diag_ind]
    transp_X = np.zeros(size)  # Здесь мы уже имеем заполненную в левой нижней части нулями матрицу...
    j = size - 1
    for i in range(size - 1, -1, -1):
        for l in range(j + 1, size):
            b[i] -= A[i][l] * transp_X[l]
        transp_X[i] = b[i] / A[i][j]
        j -= 1
    X = np.zeros(size)  # Мы нашли вектор X, но его элементы пока перепутаны, нужно переставить их по нашей перестановке
    for i in range(size):
        X[i] = transp_X[ColsTranspVector[i]]
    return X


X1 = GaussSelectingMainElementThroughoutWholeMatrix(A1, SIZE, b1)
print("Определитель первой матрицы: det(A1) =", linalg.det(A1))
print("Решение первой системы c помощью NymPy: X1 =", X1)
print("Точное решение первой системы: X1 =", X1_answer, '\n')

X2 = GaussSelectingMainElementThroughoutWholeMatrix(A2, SIZE, b2)
print("Определитель второй матрицы: det(A2) =", linalg.det(A2))
print("Решение второй системы c помощью NymPy: X2 =", X2)
print("Точное решение второй системы: X2 =", X2_answer, '\n')

X3 = GaussSelectingMainElementThroughoutWholeMatrix(A3, SIZE, b3)
print("Определитель третьей матрицы: det(A3) =", linalg.det(A3))
print("Решение третьей системы c помощью NymPy: X3 =", X3)
print("Точное решение третьей системы: X3 =", X3_answer, '\n')

print("Число обусловленности максимум-нормы для первой матрицы: X(A1) =", ConditionNumber(A1, SIZE))
print("Число обусловленности максимум-нормы для второй матрицы: X(A2) =", ConditionNumber(A2, SIZE))