Example #1
0
    def test_tangent_space(self):
        A = self.rand_cases[0]
        pr, vL = A.left_null_projector(True)
        V = vL.reshape(-1, vL.shape[-1])
        self.assertTrue(allclose(eye_like(V.T), V.conj().T @ V))
        #def H(λ): return [-4*Sz12@Sz22+2*λ*(Sx12+Sx22)]
        λ = 1

        def H(λ):
            return [Sz12 @ Sz22 + λ * (Sx12 + Sx22)]

        O, l, r, K = A.Lh(H(λ), testing=True)
        fun = norm(O(concatenate([real(K.reshape(-1)), imag(K.reshape(-1))])))
        self.assertTrue(
            abs(
                norm(O(concatenate([real(K.reshape(-1)),
                                    imag(K.reshape(-1))])))) < 1e-5)
        self.assertTrue(allclose(tr(K @ r), 0))

        O, l, r, K = A.Rh(H(λ), testing=True)
        self.assertTrue(
            abs(
                norm(O(concatenate([real(K.reshape(-1)),
                                    imag(K.reshape(-1))])))) < 1e-5)
        self.assertTrue(allclose(tr(l @ K), 0))
Example #2
0
    def E(self, op, c=None):
        """E: calculate expectation of single site operator

        :param op: operator to compute expectation of
        :param c: canonicalisation of current state.
                  Should be in ['l', 'm', 'r', None].
                  If None, decompose to vidal then use that.
        """
        if c == 'm':
            L = self.L
            A = self.data[0]
            return real_if_close(sum(L @ A * tensordot(op, C(A) @ L, [1, 0])))
        if c == 'r':
            L = self.L
            A = self.data[0]
            return real_if_close(sum(A * tensordot(op, L**2 @ C(A), [1, 0])))
        if c == 'l':
            L = self.L
            A = self.data[0]
            return real_if_close(sum(A @ L**2 * tensordot(op, C(A), [1, 0])))

        if c is None:
            G, L = self.canonicalise(to_vidal=True).data[0]
            circle = tr(G.dot(L).dot(L).dot(H(G)).dot(L).dot(L), axis1=1, axis2=3)
            #  - L - G - L -
            # |      |0     |       |0
            # |    circle   |,      op
            # |      |1     |       |1
            #  - L - G - L -
            return real_if_close(tr(circle @ op))
Example #3
0
 def left_fixed_point(self, l0=None, tol=0):
     d, D = self.d, self.D
     if l0 is not None:
         l0 = l0.reshape(D**2)
     η, l = arnoldi(self.aslinearoperator().H, k=1, v0=l0, tol=tol)
     l = rotate_to_hermitian(l)/sign(l[0])
     l = l.reshape(D, D)
     return η*np.sqrt(tr(l.conj().T@l)), l/np.sqrt(tr(l.conj().T@l))
Example #4
0
File: ppca.py Project: yochju/ppca
 def transform(self, y=None):
     if y is None:
         y = self.y
     [w, mu, sigma] = [self.w, self.mu, self.sigma]
     m = tr(w).dot(w) + sigma * np.eye(w.shape[1])
     m = inv(m)
     x = m.dot(tr(w)).dot(y - mu)
     return x
Example #5
0
 def transform(self, y=None):
     if y is None:
         return self.q_dist.x_mean
     q = self.q_dist
     [w, mu, sigma] = [q.w, q.mu, q.gamma**-1]
     m = tr(w).dot(w) + sigma * np.eye(w.shape[1])
     m = inv(m)
     x = m.dot(tr(w)).dot(y - mu)
     return x
Example #6
0
 def transform(self, y=None):
     if y is None:
         return self.q_dist.x_mean
     q = self.q_dist
     [w, mu, sigma] = [q.w, q.mu, q.gamma**-1]
     m = tr(w).dot(w) + sigma * np.eye(w.shape[1])
     m = inv(m)
     x = m.dot(tr(w)).dot(y - mu)
     return x
Example #7
0
 def transform(self, y=None):
     if y is None:
         return self.q_dist.z_mean
     q = self.q_dist
     tau_mean = q.tau_a / q.tau_b
     term1  = tau_mean * np.asarray([(q.z_cov[i].dot(tr(q.w_mean))).dot(y[:,i] - q.mu_mean) for i in range(y.shape[1])])
     term2  = np.asarray([q.z_cov[i].dot(tr(self.E_s[i].dot(self.E_m_E_T))) for i in range(y.shape[1])])
     z_mean = tr(term1 + term2)
     return z_mean
Example #8
0
 def update_w(self):
     q = self.q_dist
     z_cov = np.zeros((self.q, self.q))
     for n in range(self.n):
         x = q.z_mean[:, n]
         z_cov += x[:, np.newaxis].dot(np.array([x]))
     q.w_cov = np.diag(q.alpha_a / q.alpha_b) + q.tau_mean() * z_cov
     q.w_cov = inv(q.w_cov)
     yc = self.y - q.mu_mean[:, np.newaxis]
     q.w_mean = q.tau_mean() * q.w_cov.dot(q.z_mean.dot(tr(yc)))
     q.w_mean = tr(q.w_mean)
Example #9
0
 def update_w(self):
     q = self.q_dist
     # cov
     x_cov = np.zeros((self.q, self.q))
     for n in xrange(self.n):
         x = q.x_mean[:, n]
         x_cov += x[:, np.newaxis].dot(np.array([x]))
     q.w_cov = np.diag(q.alpha_a / q.alpha_b) + q.gamma_mean() * x_cov
     q.w_cov = inv(q.w_cov)
     # mean
     yc = self.y - q.mu_mean[:, np.newaxis]
     q.w_mean = q.gamma_mean() * q.w_cov.dot(q.x_mean.dot(tr(yc)))
     q.w_mean = tr(q.w_mean)
def test_pure_QR(m):
    random.seed(1302 * m)
    A = random.randn(m, m) + 1j * random.randn(m, m)
    A0 = 1.0 * A
    A2 = cla_utils.pure_QR(QR, maxit=10, tol=1.0e-100)
    #check it is still Hermitian
    assert (np.linalg.norm(A2 - np.conj(A2).T) < 1.0e-4)
    #check for orthogonality
    x0 = random.randn(m)
    assert (np.linalg.norm(np.dot(A2, x0)) - np.linalg(np.dot(A0, x0)) <
            1.0e-6)
    #check for conservation of trace
    assert (np.abs(np.tr(A0) - np.tr(A2)) < 1.0e-6)
Example #11
0
 def update_w(self):
     q = self.q_dist
     # cov
     x_cov = np.zeros((self.q, self.q))
     for n in xrange(self.n):
         x = q.x_mean[:, n]
         x_cov += x[:, np.newaxis].dot(np.array([x]))
     q.w_cov = np.diag(q.alpha_a / q.alpha_b) + q.gamma_mean() * x_cov
     q.w_cov = inv(q.w_cov)
     # mean
     yc = self.y - q.mu_mean[:, np.newaxis]
     q.w_mean = q.gamma_mean() * q.w_cov.dot(q.x_mean.dot(tr(yc)))
     q.w_mean = tr(q.w_mean)
def nnCostFunction(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, lam):
	'''

	Given NN parameters, layer sizes, number of labels, data, and learning rate, returns the cost of traversing NN.
	'''

	Theta1 = (reshape(nn_params[:(hidden_layer_size*(input_layer_size+1))],(hidden_layer_size,(input_layer_size+1))))
	
	Theta2 = (reshape(nn_params[((hidden_layer_size*(input_layer_size+1))):],(num_labels, (hidden_layer_size+1))))

	m = X.shape[0]
	n = X.shape[1]
	
	#forward pass
	y_eye = eye(num_labels)
	y_new = np.zeros((y.shape[0],num_labels))

	for z in range(y.shape[0]):
		y_new[z,:] = y_eye[int(y[z])-1]
	
	y = y_new

	a_1 = c_[ones((m,1)),X]
	z_2 = tr(Theta1.dot(tr(a_1)))

	a_2 = tr(sigmoid(Theta1.dot(tr(a_1))))
	a_2 = c_[ones((a_2.shape[0],1)), a_2]

	a_3 = tr(sigmoid(Theta2.dot(tr(a_2))))

	J_reg = lam/(2.*m) * (sum(sum(Theta1[:,1:]**2)) + sum(sum(Theta2[:,1:]**2)))

	J = (1./m) * sum(sum(-y*log(a_3) - (1-y)*log(1-a_3))) + J_reg

	#Backprop

	d_3 = a_3 - y
	
	d_2 = d_3.dot(Theta2[:,1:])*sigmoidGradient(z_2)

	Theta1_grad = 1./m * tr(d_2).dot(a_1)
	Theta2_grad = 1./m * tr(d_3).dot(a_2)

	#Add regularization

	Theta1_grad[:,1:] = Theta1_grad[:,1:] + lam*1.0/m*Theta1[:,1:]
	Theta2_grad[:,1:] = Theta2_grad[:,1:] + lam*1.0/m*Theta2[:,1:]

	#Unroll gradients
	grad = tr(c_[Theta1_grad.swapaxes(1,0).reshape(1,-1), Theta2_grad.swapaxes(1,0).reshape(1,-1)])
	
	#return statement for function testing:
	#return Theta1,Theta2, y, a_1, z_2, d_3, a_3, J, grad, Theta1_grad, Theta2_grad
	
	#optimize.fmin expects a single value, so cannot return grad
	return J
Example #13
0
def f(W, *args):
    N, Y, sigma = args
    _W_ = W.reshape(10, 2)

    T0 = np.dot(_W_, np.transpose(_W_)) + sigma**2 * np.ones_like(
        np.dot(_W_, np.transpose(_W_)))
    T1 = np.dot(np.dot(Y, pinv(T0)), np.transpose(Y))
    return N * det(T0) / 2 + tr(T1) / 2
Example #14
0
def getOneEllipse(A):
    evals = eig(A)[0]
    f1 = evals[0]
    f2 = evals[1]
    center = 1 / 2 * tr(A)
    x0 = center.real
    y0 = center.imag

    majAxVec_x = f2.real - f1.real
    majAxVec_y = f2.imag - f1.imag
    theta = cmath.phase(complex(majAxVec_x,
                                majAxVec_y)) / (2 * np.pi) * 360  # in DEGREES

    b = np.sqrt(
        tr(np.matmul(A, np.transpose(np.conjugate(A)))) - abs(f1)**2 -
        abs(f2)**2).real
    a = np.sqrt(b**2 + abs(f1 - f2)**2).real
    elParam = [(x0, y0), a, b, theta]
    return elParam
Example #15
0
    def update_z(self, s, T, m):
        E_s = s
        self.E_s = E_s
        scale, dof = T
        scale_inv = np.asarray([pinvh(sc) for sc in scale])
        E_m = m
        E_T = np.asarray([df * sc for sc,df in zip(scale_inv,dof)])

        E_s_E_T = np.tensordot(E_s, E_T, axes=([1],[0]))
        E_m_E_T = np.asarray([tr(em).dot(et) for em,et in zip(E_m,E_T)])
        self.E_m_E_T = E_m_E_T

        q = self.q_dist
        tau_mean = q.tau_a / q.tau_b
        q.z_cov  = inv(tau_mean * tr(q.w_mean).dot(q.w_mean) + E_s_E_T)
        tr_w_mean = tr(q.w_mean)

        term1 = tau_mean * np.asarray([(q.z_cov[i].dot(tr_w_mean)).dot(self.y[:,i] - q.mu_mean) for i in range(self.n)])
        term2 = np.asarray([q.z_cov[i].dot(tr(E_s[i].dot(E_m_E_T))) for i in range(self.n)])
        q.z_mean = tr(term1 + term2)
Example #16
0
 def E(self, op):
     """E: TOTEST
     """
     G, L = self.data[0]
     circle = tr(G.dot(L).dot(L).dot(H(G)).dot(L).dot(L), axis1=1, axis2=3)
     #  - L - G - L -
     # |      |0     |       |0
     # |    circle   |,      op
     # |      |1     |       |1
     #  - L - G - L -
     return real_if_close(tensordot(circle, op, [[0, 1], [0, 1]]))
Example #17
0
File: ppca.py Project: yochju/ppca
    def __fit_em(self, maxit=20):
        w = np.random.rand(self.p, self.q)
        mu = np.mean(self.y, 1)[:, np.newaxis]
        sigma = self.prior_sigma
        ll = self.__ell(w, mu, sigma)

        yy = self.y - mu
        s = self.n**-1 * yy.dot(tr(yy))
        for i in xrange(maxit):
            m = inv(tr(w).dot(w) + sigma * np.eye(self.q))
            t = inv(sigma * np.eye(self.q) + m.dot(tr(w)).dot(s).dot(w))
            w_new = s.dot(w).dot(t)
            sigma_new = self.p**-1 * np.trace(s -
                                              s.dot(w).dot(m).dot(tr(w_new)))
            ll_new = self.__ell(w_new, mu, sigma_new)
            print "{:3d}  {:.3f}".format(i + 1, ll_new)
            w = w_new
            sigma = sigma_new
            ll = ll_new
        return (w, mu, sigma)
Example #18
0
    def energy(self, H):
        """energy of sum of two site terms
        """
        d, D = self.d, self.D
        h = H[0].reshape(d, d, d, d)
        A = self.data[0]
        _, l, r = self.eigs()

        C = ncon([h]+[A, A], [[-1, -2, 1, 2], [1, -3, 3], [2, 3, -4]]) # HAA

        K = ncon([[email protected](), A.conj()]+[C], [[1, 3, 4], [2, 4, -2], [1, 2, 3, -1]]) #AAHAA
        self.e = tr(K@r)
        return real(self.e)
Example #19
0
def quantiles(x, qlist=[2.5, 25, 50, 75, 97.5]):
    """Returns a dictionary of requested quantiles from array"""

    # Make a copy of trace
    x = x.copy()

    # For multivariate node
    if x.ndim>1:
        # Transpose first, then sort, then transpose back
        sx = tr(sort(tr(x)))
    else:
        # Sort univariate node
        sx = sort(x)

    try:
        # Generate specified quantiles
        quants = [sx[int(len(sx)*q/100.0)] for q in qlist]

        return dict(zip(qlist, quants))

    except IndexError:
        print "Too few elements for quantile calculation"
Example #20
0
 def update_gamma(self):
     q = self.q_dist
     q.gamma_a = self.hyper.gamma_a + 0.5 * self.n * self.p
     q.gamma_b = self.hyper.gamma_b
     w = q.w_mean
     ww = tr(w).dot(w)
     for n in range(self.n):
         y = self.y[:, n]
         x = q.x_mean[:, n]
         q.gamma_b += y.dot(y) + q.mu_mean.dot(q.mu_mean)
         q.gamma_b += np.trace(ww.dot(x[:, np.newaxis].dot([x])))
         q.gamma_b += 2.0 * q.mu_mean.dot(w).dot(x[:, np.newaxis])
         q.gamma_b -= 2.0 * y.dot(w).dot(x)
         q.gamma_b -= 2.0 * y.dot(q.mu_mean)
Example #21
0
 def update_tau(self):
     q = self.q_dist
     q.tau_a = self.hyper.tau_a + 0.5 * self.n * self.p
     q.tau_b = self.hyper.tau_b
     w = q.w_mean
     ww = tr(w).dot(w)
     for n in range(self.n):
         y = self.y[:, n]
         x = q.z_mean[:, n]
         q.tau_b += (y.dot(y) + q.mu_mean.dot(q.mu_mean)) / 2
         q.tau_b += (np.trace(ww.dot(x[:, np.newaxis].dot([x])))) / 2
         q.tau_b += (2.0 * q.mu_mean.dot(w).dot(x[:, np.newaxis])) / 2
         q.tau_b -= (2.0 * y.dot(w).dot(x)) / 2
         q.tau_b -= (2.0 * y.dot(q.mu_mean)) / 2
Example #22
0
 def update_gamma(self):
     q = self.q_dist
     q.gamma_a = self.hyper.gamma_a + 0.5 * self.n * self.p
     q.gamma_b = self.hyper.gamma_b
     w = q.w_mean
     ww = tr(w).dot(w)
     for n in xrange(self.n):
         y = self.y[:, n]
         x = q.x_mean[:, n]
         q.gamma_b += y.dot(y) + q.mu_mean.dot(q.mu_mean)
         q.gamma_b += np.trace(ww.dot(x[:, np.newaxis].dot([x])))
         q.gamma_b += 2.0 * q.mu_mean.dot(w).dot(x[:, np.newaxis])
         q.gamma_b -= 2.0 * y.dot(w).dot(x)
         q.gamma_b -= 2.0 * y.dot(q.mu_mean)
Example #23
0
    def __ollh(self, A, mu, sigma2):
        """Observed data log likelihood"""

        ll = 0.0

        for i in np.arange(self.n):

            s = self.S[:, i]
            nobs = int(sum(s))
            m = mu[s == 1]
            y = self.Y[s == 1, i] - m
            a = A[s == 1, :]

            if nobs > 0:

                C = np.dot(a, tr(a)) + sigma2 * np.eye(nobs)
                logDetC = np.log(linalg.det(C))
                #logDetC = sum(np.log(linalg.eigvals(C)))
                ll = ll - 0.5 * nobs * np.log(
                    2 * np.pi) - 0.5 * logDetC - 0.5 * tr(y).dot(
                        pinv(C)).dot(y)

        return ll
Example #24
0
    def eigs(self, l0=None, r0=None):
        A = self.A
        if l0 is not None:
            l0 = l0.reshape(self.shape[1])
        if r0 is not None:
            r0 = r0.reshape(self.shape[1])
        _,   r = arnoldi(self.aslinearoperator(), k=1, v0=r0)
        eta, l = arnoldi(self.aslinearoperator().H, k=1, v0=l0)

        r, l = (rotate_to_hermitian(r.reshape(A.shape[1:]))/sign(r[0]),
                rotate_to_hermitian(l.reshape(A.shape[1:]))/sign(l[0]))

        n = tr(l @ r)
        q = 1#tr(l@l)

        return real_if_close(eta), l*sqrt(q)/sqrt(n), r/sqrt(q*n)
Example #25
0
def hpd(x, alpha):
    """Calculate highest posterior density (HPD) of array for given alpha. The HPD is the
    minimum width Bayesian credible interval (BCI).

    :Arguments:
      x : Numpy array
          An array containing MCMC samples
      alpha : float
          Desired probability of type I error

    """

    # Make a copy of trace
    x = x.copy()

    # For multivariate node
    if x.ndim > 1:

        # Transpose first, then sort
        tx = tr(x, list(range(x.ndim)[1:]) + [0])
        dims = shape(tx)

        # Container list for intervals
        intervals = np.resize(0.0, (2, ) + dims[:-1])

        for index in make_indices(dims[:-1]):

            try:
                index = tuple(index)
            except TypeError:
                pass

            # Sort trace
            sx = sort(tx[index])

            # Append to list
            intervals[0][index], intervals[1][index] = calc_min_interval(
                sx, alpha)

        # Transpose back before returning
        return array(intervals)

    else:
        # Sort univariate node
        sx = sort(x)

        return array(calc_min_interval(sx, alpha))
Example #26
0
def hpd(x, alpha):
    """Calculate highest posterior density (HPD) of array for given alpha. The HPD is the
    minimum width Bayesian credible interval (BCI).

    :Arguments:
      x : Numpy array
          An array containing MCMC samples
      alpha : float
          Desired probability of type I error

    """

    # Make a copy of trace
    x = x.copy()

    # For multivariate node
    if x.ndim > 1:

        # Transpose first, then sort
        tx = tr(x, list(range(x.ndim)[1:]) + [0])
        dims = shape(tx)

        # Container list for intervals
        intervals = np.resize(0.0, (2,) + dims[:-1])

        for index in make_indices(dims[:-1]):

            try:
                index = tuple(index)
            except TypeError:
                pass

            # Sort trace
            sx = sort(tx[index])

            # Append to list
            intervals[0][index], intervals[1][index] = calc_min_interval(sx, alpha)
        
        # Transpose back before returning
        return array(intervals)

    else:
        # Sort univariate node
        sx = sort(x)

        return array(calc_min_interval(sx, alpha))
Example #27
0
def compute_fid(model, data_set1, data_set2):
    activation1 = model.predict(data_set1)
    activation2 = model.predict(data_set2)

    (mean1, mean2) = activation1.mean(axis=0), activation2.mean(axis=0)
    (sigma1, sigma2) = cov(activation1, rowvar=False), cov(activation2,
                                                           rowvar=False)

    squared_diff = np.sum(mean1 - mean2)**2.0
    covariance_mean = sqrtm(sigma1.dot(sigma2))

    if iscomplexobj(covariance_mean):
        covariance_mean = covariance_mean.real

    fid = squared_diff + tr(sigma1 + sigma2 - 2.0 * covariance_mean)
    fid = round(fid, 2)
    return fid
Example #28
0
    def update(self, H, δt):
        """mixed gauge update (inverse free) as in verstraeten notes

        :param H: hamiltonian   
        :param δt: timestep
        """
        raise NotImplementedError('Not implemented properly yet')
        self.canonicalise('r')
        d, D = self.d, self.D
        h = H[0].reshape(d, d, d, d)
        A = self.data[0]
        _, l, r = self.eigs()

        C = ncon([h]+[A, A], [[-1, -2, 1, 2], [1, -3, 3], [2, 3, -4]]) # HAA
        K = ncon([[email protected](), A.conj()]+[C], [[1, 3, 4], [2, 4, -2], [1, 2, 3, -1]]) #AAHAA
        self.e = tr(K@r)

        pr = self.left_null_projector()

        Lh = self.Lh(H)
        Rh = self.Rh(H)

        AL, AR, C = self.mixed()
        AL, AR, C = AL[0], AR[0], C
        AC = AL@C

        G1 = -1j*zl(A)

    
        G1 += ncon([ncon([h]+[AC, AR], [[-1, -2, 1, 2], [1, -3, 3], [2, 3, -4]]),
                    c(AR)], [[-1, 2, -2, 1], [2, -3, 1]])
        G1 += ncon([ncon([h]+[AL, AC], [[-1, -2, 1, 2], [1, -3, 3], [2, 3, -4]]),
                    c(AL)], [[2, -1, 1, -3], [2, 1, -2]])

        G1 += AC@Rh
        G1 += Lh@AC

        #G2 = ncon([G1, c(AC)], [[2, 1, -1], [2, 1, -2] ])
        #print(tr(G2), self.e)
        #raise Exception

        G2 = ncon([G1, c(AL)], [[2, 1, -1], [2, 1, -2] ])

        print(C1.shape, C2.shape, C3.shape, C4.shape)
        raise Exception
Example #29
0
File: ppca.py Project: yochju/ppca
 def __ell(self, w, mu, sigma, norm=True):
     m = inv(tr(w).dot(w) + sigma * np.eye(w.shape[1]))
     mw = m.dot(tr(w))
     ll = 0.0
     for i in xrange(self.n):
         yi = self.y[:, i][:, np.newaxis]
         yyi = yi - mu
         xi = mw.dot(yyi)
         xxi = sigma * m + xi.dot(tr(xi))
         ll += 0.5 * np.trace(xxi)
         if sigma > 1e-5:
             ll += (2 * sigma)**-1 * float(tr(yyi).dot(yyi))
             ll -= sigma**-1 * float(tr(xi).dot(tr(w)).dot(yyi))
             ll += (2 * sigma)**-1 * np.trace(tr(w).dot(w).dot(xxi))
     if sigma > 1e-5:
         ll += 0.5 * self.n * self.p * np.log(sigma)
     ll *= -1.0
     if norm:
         ll /= float(self.n)
     return ll
Example #30
0
def crispness(alpha, T, right_eigenvectors, square_map, pi):
    """Return the crispness PCCA+ objective function.

    Parameters
    ----------
    alpha : ndarray
        Parameters of objective function (e.g. flattened A)
    T : csr sparse matrix
        Transition matrix
    right_eigenvectors : ndarray
        The right eigenvectors.
    square_map : ndarray
        Mapping from square indices (i,j) to flat indices (k).
    pi : ndarray
        Equilibrium Populations of transition matrix.

    Returns
    -------
    obj : float
        The objective function

    Notes
    -------
    Tries to make crisp state decompostion.  This function is
    defined in [3].
    """

    A, chi, mapping = calculate_fuzzy_chi(alpha, square_map,
                                          right_eigenvectors)

    # If current point is infeasible or leads to degenerate lumping.
    if (len(np.unique(mapping)) != right_eigenvectors.shape[1] or
            has_constraint_violation(A, right_eigenvectors)):
        return -1.0 * np.inf

    obj = tr(dot(diag(1. / A[0]), dot(A.transpose(), A)))

    return obj
Example #31
0
def crispness(alpha, T, right_eigenvectors, square_map, pi):
    """Return the crispness PCCA+ objective function.

    Parameters
    ----------
    alpha : ndarray
        Parameters of objective function (e.g. flattened A)
    T : csr sparse matrix
        Transition matrix
    right_eigenvectors : ndarray
        The right eigenvectors.
    square_map : ndarray
        Mapping from square indices (i,j) to flat indices (k).
    pi : ndarray
        Equilibrium Populations of transition matrix.

    Returns
    -------
    obj : float
        The objective function

    Notes
    -------
    Tries to make crisp state decompostion.  This function is
    defined in [3].
    """

    A, chi, mapping = calculate_fuzzy_chi(alpha, square_map,
                                          right_eigenvectors)

    # If current point is infeasible or leads to degenerate lumping.
    if (len(np.unique(mapping)) != right_eigenvectors.shape[1] or
            has_constraint_violation(A, right_eigenvectors)):
        return -1.0 * np.inf

    obj = tr(dot(diag(1. / A[0]), dot(A.transpose(), A)))

    return obj
Example #32
0
    def dA_dt(self, H):
        d, D = self.d, self.D
        h = H[0].reshape(d, d, d, d)
        A = self.data[0]
        _, l, r = self.eigs()

        C = ncon([h]+[A, A], [[-1, -2, 1, 2], [1, -3, 3], [2, 3, -4]]) # HAA

        K = ncon([[email protected](), A.conj()]+[C], [[1, 3, 4], [2, 4, -2], [1, 2, 3, -1]]) #AAHAA
        self.e = tr(K@r)

        pr = self.left_null_projector()

        R = ncon([pr, A], [[-3, -4, 1, -2], [1, -1, -5]])
        K = self.Lh(H)

        B = -1j*zl(A)
        B += -1j*ncon([l@C@r, pr, inv(r)@A.conj()], [[3, 4, 1, 2], [-1, -2, 3, 1], [4, -3, 2]])
        B += -1j*ncon([[email protected](), pr]+[C], [[1, 3, 4], [-1, -2, 2, 4], [1, 2, 3, -3]])
        B += -1j *ncon([K, R], [[1, 2], [1, 2, -1, -2, -3]])
        B = iMPS([B])
        B.l, B.r, B.vL = l, r, self.vL
        return B
Example #33
0
def hpd(x, alpha):
    """Calculate HPD (minimum width BCI) of array for given alpha"""

    # Make a copy of trace
    x = x.copy()

    # For multivariate node
    if x.ndim>1:

        # Transpose first, then sort
        tx = tr(x, range(x.ndim)[1:]+[0])
        dims = shape(tx)

        # Container list for intervals
        intervals = np.resize(0.0, dims[:-1]+(2,))

        for index in make_indices(dims[:-1]):

            try:
                index = tuple(index)
            except TypeError:
                pass

            # Sort trace
            sx = sort(tx[index])

            # Append to list
            intervals[index] = calc_min_interval(sx, alpha)

        # Transpose back before returning
        return array(intervals)

    else:
        # Sort univariate node
        sx = sort(x)

        return array(calc_min_interval(sx, alpha))
Example #34
0
means=numpy.average(samples.data,axis=0)
stds = numpy.std(samples.data,axis=0)
samples.data=( samples.data - means ) / stds

#[weights,sigma2,x_mean] = ppca(samples.data,2)
#weights=[weights]
#sigma2=[sigma2]
#x_mean=[x_mean]
#gausian_weight=[1.]

[weights,sigma2,x_mean,gausian_weight] = mppca(samples.data)

m = []
for i in range(len(x_mean)):
    a = tr(weights[i]).dot(weights[i]) + float(sigma2[i]) * numpy.eye(weights[i].shape[1])
    m.append(numpy.linalg.inv(a))
targets = samples.target
norm_sum=0.
for i,x in enumerate(numpy.matrix(samples.data)):
    #new_sample = numpy.matrix(numpy.zeros([weights[0].shape[1],1]))
    #for j in range(len(x_mean)):
    #    new_sample += gausian_weight[j]*(numpy.linalg.inv(weights[j].T*weights[j])*weights[j].T*x.T)
    #new_sample2 = numpy.matrix(numpy.zeros([weights[0].shape[0],1]))
    #for j in range(len(x_mean)):
    #    new_sample2 += gausian_weight[j]*(weights[j]*new_sample)

    new_sample=weights[0]*numpy.linalg.inv(weights[0].T*weights[0])*weights[0].T*x.T
    new_sample2=weights[1]*numpy.linalg.inv(weights[1].T*weights[1])*weights[1].T*x.T
    new_sample3=weights[2]*numpy.linalg.inv(weights[2].T*weights[2])*weights[2].T*x.T
    #new_sample4=weights[3]*numpy.linalg.inv(weights[3].T*weights[3])*weights[3].T*x.T
Example #35
0
 def Wishart(old, new):
     #First get the expectation of the log of the determinant of rho:
     d = old['tau'].shape[1]
     i = np.arange(d)
     ER = -log(det(old['tau']) / 2) + sum(psi((old['alpha'] - i + 1)/2))
 
     #Now compute the overall KL divergence:
     return (old['alpha']-new['alpha']) / 2 * ER - old['alpha'] * d / 2 + old['alpha'] / 2. * tr(new['tau'] * inv(old['tau'])) + log(Z(new['alpha'], new['tau'])) - log(Z(old['alpha'], old['tau']))
Example #36
0
 def structure(self, val):
     self._structure = val
     self.bmat = tr(inv(val.cell))
     self.point_group = val.get_spacegroup(self.prec)["Number"]
     self._assign_path()
Example #37
0
 def update_x(self):
     q = self.q_dist
     gamma_mean = q.gamma_a / q.gamma_b
     q.x_cov = inv(np.eye(self.q) + gamma_mean * tr(q.w_mean).dot(q.w_mean))
     q.x_mean = gamma_mean * q.x_cov.dot(tr(q.w_mean)).dot(self.y - q.mu_mean[:, np.newaxis])
Example #38
0
        trK += Ktmp[counter, j]
        counter += 1

yK = np.array(yK).reshape(1, n_subj)

XX = dot(t(X), X)
XXinv = np.linalg.inv(XX)
Z = dot(X, XXinv)
yZ = dot(t(y), Z)
yPy = dot(t(y), y) - dot(dot(yZ, t(X)), y)

yZXK = dot(yZ, XK)
XKZ = dot(XK, Z)

yPKPy = dot(yK, y) - 2 * dot(yZXK, y) + dot(dot(yZXK, X), t(yZ))
trPK = trK - tr(XKZ)
trPKPK = trKK - 2 * tr(dot(dot(XXinv, XK), t(XK))) + tr(XKZ * XKZ)

S = np.array([trPKPK, trPK, trPK, n_subj - n_cov]).reshape(2, 2)
q = np.array([yPKPy, yPy]).reshape(2, 1)
Vc = dot(np.linalg.inv(S), q)

Vc[Vc < 0] = 0
s = trPKPK - trPK * trPK / (n_subj - n_cov)

h2 = np.asscalar(max(min(Vc[0] / sum(Vc), 1), 0))
se = pow(2 / s, 0.5)
print h2, se

print("--- %s seconds ---" % (time.time() - start_time))
Example #39
0
def PPCA(Y_mat, d=20):
    """
       Implements probabilistic PCA for data with missing values,
       using a factorizing distribution over hidden states and hidden observations.
       Args:
           Y:   (N by D ) input numpy ndarray of data vectors
           d:   (  int  ) dimension of latent space
           dia: (boolean) if True: print objective each step
       Returns:
           ss: ( float ) isotropic variance outside subspace
           C:  (D by d ) C*C' + I*ss is covariance model, C has scaled principal directions as cols
           M:  (D by 1 ) data mean
           X:  (N by d ) expected states
           Ye: (N by D ) expected complete observations (differs from Y if data is missing)
           Based on MATLAB code from J.J. VerBeek, 2006. http://lear.inrialpes.fr/~verbeek
    """
    Y = Y_mat.copy()
    N, D = shape(
        Y
    )  # N observations in D dimensions (i.e. D is number of features, N is samples)
    threshold = 1E-4  # minimal relative change in objective function to continue
    hidden = isnan(Y)
    missing = hidden.sum()

    if (missing > 0):
        M = nanmean(Y, axis=0)
    else:
        M = average(Y, axis=0)

    Ye = Y - repmat(M, N, 1)

    if (missing > 0):
        Ye[hidden] = 0

    # initialize
    C = normal(loc=0.0, scale=1.0, size=(D, d))
    CtC = mm(C.T, C)
    X = mm(mm(Ye, C), inv(CtC))
    recon = mm(X, C.T)
    recon[hidden] = 0
    ss = np.sum((recon - Ye)**2) / (N * D - missing)

    count = 1
    old = np.inf

    # EM Iterations
    while (count):
        Sx = inv(eye(d) + CtC / ss)  # E-step, covariances
        ss_old = ss
        if (missing > 0):
            proj = mm(X, C.T)
            Ye[hidden] = proj[hidden]

        X = mm(mm(Ye, C), Sx / ss)  # E-step: expected values

        SumXtX = mm(X.T, X)  # M-step
        C = mm(mm(mm(Ye.T, X), (SumXtX + N * Sx).T),
               inv(mm((SumXtX + N * Sx), (SumXtX + N * Sx).T)))
        CtC = mm(C.T, C)
        ss = (np.sum((mm(X, C.T) - Ye)**2) + N * np.sum(CtC * Sx) +
              missing * ss_old) / (N * D)
        # transform Sx determinant into numpy float128 in order to deal with high dimensionality
        Sx_det = np.min(Sx).astype(np.float64)**shape(Sx)[0] * det(
            Sx / np.min(Sx))
        objective = N * D + N * (D * log(ss) + tr(Sx) - log(Sx_det)) + tr(
            SumXtX) - missing * log(ss_old)

        rel_ch = np.abs(1 - objective / old)
        old = objective

        count = count + 1
        if (rel_ch < threshold and count > 5):
            count = 0
        # if (dia == True):
        #     print('Objective: %.2f, Relative Change %.5f' % (objective, rel_ch))

    # C = orth(C)
    # covM = cov(mm(Ye, C).T)
    # vals, vecs = eig(covM)
    # ordr = np.argsort(vals)[::-1]
    # vals = vals[ordr]
    # vecs = vecs[:, ordr]

    # C = mm(C, vecs)
    # X = mm(Ye, C)

    # add data mean to expected complete data
    Ye = Ye + repmat(M, N, 1)

    # return C, ss, M, X, Ye
    return Ye