コード例 #1
1
def _dpmm(coords, alpha, null_density, dof, prior_precision, prior_h0,
          subjects, sampling_coords=None, n_iter=1000, burnin=100,
          co_clust=False):
    """Apply the dpmm analysis to compute clusters from regions coordinates
    """
    from nipy.algorithms.clustering.imm import MixedIMM

    dim = coords.shape[1]
    migmm = MixedIMM(alpha, dim)
    migmm.set_priors(coords)
    migmm.set_constant_densities(
        null_dens=null_density, prior_dens=null_density)
    migmm._prior_dof = dof
    migmm._prior_scale = np.diag(prior_precision[0] / dof)
    migmm._inv_prior_scale_ = [np.diag(dof * 1. / (prior_precision[0]))]
    migmm.sample(coords, null_class_proba=prior_h0, niter=burnin, init=False,
                 kfold=subjects)

    # sampling
    like, pproba, co_clustering = migmm.sample(
        coords, null_class_proba=prior_h0, niter=n_iter, kfold=subjects,
        sampling_points=sampling_coords, co_clustering=True)

    if co_clust:
        return like, 1 - pproba, co_clustering
    else:
        return like, 1 - pproba
コード例 #2
0
ファイル: P3.py プロジェクト: MilosAtz/NE155
def flux_fdm(h,a,D,sig_a,S):
	# First, we determine the number of cells and points.
	n_cell = int((a-(-1*a))/h)
	n_points = n_cell+1
#####################################################################################################
# We want to set up the system Ax=b, where A is a tridiagonal matrix and x contains the flux at each point. Because S is constant, the vector b will be constant as well.
	b=np.zeros(n_cell-1)
	for i in range(0, n_cell-1):
		b[i]=S*h**2/D
	b=np.transpose(np.matrix(b))
#####################################################################################################
# A is made up out of the coefficients for flux; A is a tridiagonal matrix. The inputs a, b, and c allow for the input of those coefficients.
	A_a=[-1]*int(n_cell-2)
	A_b=[2+(sig_a*h**2/D)]*int(n_cell-1)
	A_c=[-1]*int(n_cell-2)
	A=np.matrix(np.diag(A_a, -1) + np.diag(A_b, 0) + np.diag(A_c, 1))
#####################################################################################################
# Utilize the Thomas method to solve the system of equations
	phi = [0]*n_points
	for i in range(1,n_cell-1):
		A[i,i] = A[i,i]-(A[i,i-1]/A[i-1,i-1])*A[i-1,i]
		b[i]=b[i]-(A[i,i-1]/A[i-1,i-1])*b[i-1]
	phi[n_cell-1]=b[n_cell-2]/A[n_cell-2,n_cell-2]
	for i in range(n_cell-3, -1, -1):
		phi[i+1]=(b[i]-A[i,i+1]*phi[i+2])/A[i,i]
	return(phi)
コード例 #3
0
ファイル: test_affines.py プロジェクト: Jan-Schreiber/nibabel
def test_append_diag():
    # Routine for appending diagonal elements
    assert_array_equal(append_diag(np.diag([2,3,1]), [1]),
                       np.diag([2,3,1,1]))
    assert_array_equal(append_diag(np.diag([2,3,1]), [1,1]),
                       np.diag([2,3,1,1,1]))
    aff = np.array([[2,0,0],
                    [0,3,0],
                    [0,0,1],
                    [0,0,1]])
    assert_array_equal(append_diag(aff, [5], [9]),
                       [[2,0,0,0],
                        [0,3,0,0],
                        [0,0,0,1],
                        [0,0,5,9],
                        [0,0,0,1]])
    assert_array_equal(append_diag(aff, [5,6], [9,10]),
                       [[2,0,0,0,0],
                        [0,3,0,0,0],
                        [0,0,0,0,1],
                        [0,0,5,0,9],
                        [0,0,0,6,10],
                        [0,0,0,0,1]])
    aff = np.array([[2,0,0,0],
                    [0,3,0,0],
                    [0,0,0,1]])
    assert_array_equal(append_diag(aff, [5], [9]),
                       [[2,0,0,0,0],
                        [0,3,0,0,0],
                        [0,0,0,5,9],
                        [0,0,0,0,1]])
    # Length of starts has to match length of steps
    assert_raises(ValueError, append_diag, aff, [5,6], [9])
コード例 #4
0
ファイル: svm.py プロジェクト: dabbabi-zayani/Python-SVM
    def _compute_multipliers(self, X, y):
        n_samples, n_features = X.shape

        K = self._gram_matrix(X)
        # Solves
        # min 1/2 x^T P x + q^T x
        # s.t.
        #  Gx \coneleq h
        #  Ax = b

        P = cvxopt.matrix(np.outer(y, y) * K)
        q = cvxopt.matrix(-1 * np.ones(n_samples))

        # -a_i \leq 0
        # TODO(tulloch) - modify G, h so that we have a soft-margin classifier
        G_std = cvxopt.matrix(np.diag(np.ones(n_samples) * -1))
        h_std = cvxopt.matrix(np.zeros(n_samples))

        # a_i \leq c
        G_slack = cvxopt.matrix(np.diag(np.ones(n_samples)))
        h_slack = cvxopt.matrix(np.ones(n_samples) * self._c)

        G = cvxopt.matrix(np.vstack((G_std, G_slack)))
        h = cvxopt.matrix(np.vstack((h_std, h_slack)))

        A = cvxopt.matrix(y, (1, n_samples))
        b = cvxopt.matrix(0.0)

        solution = cvxopt.solvers.qp(P, q, G, h, A, b)

        # Lagrange multipliers
        return np.ravel(solution['x'])
コード例 #5
0
ファイル: RBFKernel.py プロジェクト: LinZhineng/atldgp
    def grad_EVzxVzxT_by_hyper_exact(self, EVzxVzxT_list_this, Z, A, B, hyperno):

        P = Z.shape[0]
        R = Z.shape[1]
        N = A.shape[0]

        if hyperno != 0:
            return EVzxVzxT_list_this * 0

        alpha = self.length_scale * self.length_scale

        I = np.identity(R)
        S = np.diag(B[0, :] * B[0, :])
        Sinv = np.diag(1 / B[0, :] * B[0, :])
        C = I * alpha
        Cinv = I * (1 / alpha)
        CinvSinv = 2 * Cinv + Sinv
        CinvSinv_inv = np.diag(1 / CinvSinv.diagonal())

        dC = self.length_scale * I
        dCinv = -Cinv.dot(dC).dot(Cinv)
        dCinvSinv = 2 * dCinv
        dCinvSinv_inv = -CinvSinv_inv.dot(dCinvSinv).dot(CinvSinv_inv)

        S1 = (
            dCinv
            - dCinv.dot(CinvSinv_inv).dot(Cinv)
            - Cinv.dot(dCinvSinv_inv).dot(Cinv)
            - Cinv.dot(CinvSinv_inv).dot(dCinv)
        )
        S2 = -Sinv.dot(dCinvSinv_inv).dot(Sinv)
        S3 = Sinv.dot(dCinvSinv_inv).dot(Cinv) + Sinv.dot(CinvSinv_inv).dot(dCinv)
        S4 = dCinv.dot(CinvSinv_inv).dot(Cinv) + Cinv.dot(dCinvSinv_inv).dot(Cinv) + Cinv.dot(CinvSinv_inv).dot(dCinv)

        T1s = np.tile(Z.dot(S1).dot(Z.T).diagonal(), [P, 1])
        T1 = np.tile(T1s, [N, 1, 1])
        T2s = T1s.T
        T2 = np.tile(T2s, [N, 1, 1])
        T3 = np.tile(Z.dot(S4).dot(Z.T), [N, 1, 1])
        T4 = np.tile(A.dot(S2).dot(A.T).diagonal(), [P, 1]).T
        T4 = np.expand_dims(T4, axis=2)
        T4 = np.repeat(T4, P, axis=2)
        T5 = A.dot(S3).dot(Z.T)
        T5 = np.expand_dims(T5, axis=2)
        T5 = np.repeat(T5, P, axis=2)
        T6 = np.swapaxes(T5, 1, 2)

        SCinvI = 2 * Cinv.dot(S) + I
        SCinvI_inv = np.diag(1 / SCinvI.diagonal())
        (temp, logDetSCinvI) = np.linalg.slogdet(SCinvI)
        detSCinvI = np.exp(logDetSCinvI)
        dDetSCinvI = -0.5 * np.power(detSCinvI, -0.5) * SCinvI_inv.dot(2 * dCinv).dot(S).trace()

        expTerm = EVzxVzxT_list_this / np.power(detSCinvI, -0.5)

        res = EVzxVzxT_list_this * (-0.5 * T1 - 0.5 * T2 + T3 - 0.5 * T4 + T5 + T6) + dDetSCinvI * expTerm

        res = np.sum(res, axis=0)

        return res
コード例 #6
0
ファイル: hopfield.py プロジェクト: team-hdnet/hdnet
 def objective_gradient(self, X, J=None, return_K=False):
     """
     Computes MPF objective gradient on input data X given coupling
     strengths J.
     
     Parameters
     ----------
     X : numpy array
         (M, N)-dim array of binary input patterns of length N,
         where N is the number of nodes in the network
     J : numpy array, optional
         Coupling matrix of size N x N, where N denotes the number
         of nodes in the network (default None)
     return_K : bool, optional
         Flag wether to return K (default False)
     
     Returns
     -------
     dJ [, K] : numpy array [, numpy array]
         Update to coupling matrix J [and K if return_K is True]
     """
     if J is None:
         J = self._J
         J[np.eye(self._N, dtype=bool)] = -2 * self._theta
     X = np.atleast_2d(X)
     M, N = X.shape
     S = 2 * X - 1
     Kfull = np.exp(-S * np.dot(X, J.T) + .5 * np.diag(J)[None, :])
     dJ = -np.dot(X.T, Kfull * S) + .5 * np.diag(Kfull.sum(0))
     if self._symmetric is True:
         dJ = .5 * (dJ + dJ.T)
     if return_K:
         return Kfull.sum() / M, dJ / M
     else:
         return dJ / M
コード例 #7
0
ファイル: score.py プロジェクト: sinianyutian/keras-fcn-1
def mean_IU(y_true, y_pred):
    """Compute mean IoU."""
    confusion = compute_error_matrix(y_true, y_pred)
    # per-class IU
    iu = np.diag(confusion) / (confusion.sum(1) + confusion.sum(0)
                               - np.diag(confusion))
    return np.nanmean(iu)
コード例 #8
0
def svdUpdate(U, S, V, a, b):
    """
    Update SVD of an (m x n) matrix `X = U * S * V^T` so that
    `[X + a * b^T] = U' * S' * V'^T`
    and return `U'`, `S'`, `V'`.
    
    `a` and `b` are (m, 1) and (n, 1) rank-1 matrices, so that svdUpdate can simulate 
    incremental addition of one new document and/or term to an already existing 
    decomposition.
    """
    rank = U.shape[1]
    m = U.T * a
    p = a - U * m
    Ra = numpy.sqrt(p.T * p)
    assert float(Ra) > 1e-10
    P = (1.0 / float(Ra)) * p
    n = V.T * b
    q = b - V * n
    Rb = numpy.sqrt(q.T * q)
    assert float(Rb) > 1e-10
    Q = (1.0 / float(Rb)) * q

    K = numpy.matrix(numpy.diag(list(numpy.diag(S)) + [0.0])) + numpy.bmat("m ; Ra") * numpy.bmat(" n; Rb").T
    u, s, vt = numpy.linalg.svd(K, full_matrices=False)
    tUp = numpy.matrix(u[:, :rank])
    tVp = numpy.matrix(vt.T[:, :rank])
    tSp = numpy.matrix(numpy.diag(s[:rank]))
    Up = numpy.bmat("U P") * tUp
    Vp = numpy.bmat("V Q") * tVp
    Sp = tSp
    return Up, Sp, Vp
コード例 #9
0
def transfer_f(dw,aas,aai,eps,deltaw,f):
    """
    Args:
    dw: size of the grid spacing
    aas=relative slowness of the signal mode
    aai=relative slowness of the idler mode
    lnl=inverse of the strength of the nonlinearity
    deltaw:  specifies the size of the frequency grid going from
    -deltaw to deltaw for each frequency
    f: shape of the pump function
    """
    ddws=np.arange(-deltaw-dw/2,deltaw+dw/2,dw)
    deltaks=aas*ddws
    ddwi=np.arange(-deltaw-dw/2,deltaw+dw/2,dw)
    deltaki=aai*ddwi
    ds=np.diag(deltaks)
    di=np.diag(deltaki)


    def ff(x,y):
        return f(x+y)
    
    v=eps*(dw)*ff(ddwi[:,None],ddws[None,:])
    G=1j*np.concatenate((np.concatenate((ds,v),axis=1),np.concatenate((-v,-di),axis=1)),axis=0)
    z=1;
    dsi=np.concatenate((deltaks,-deltaki),axis=0)
    U0=linalg.expm(-1j*np.diag(dsi)*z/2)
    GG=np.dot(np.dot(U0,linalg.expm(G)),U0)
    n=len(ddws)
    return (GG[0:n,0:n],GG[n:2*n,0:n],GG[0:n,n:2*n],GG[n:2*n,n:2*n])
コード例 #10
0
ファイル: crf.py プロジェクト: cshen/pystruct
    def psi(self, x, y):
        # x is unaries
        # y is a labeling
        ## unary features:
        gx, gy = np.ogrid[:x.shape[0], :x.shape[1]]
        selected_unaries = x[gx, gy, y]
        unaries_acc = np.sum(x[gx, gy, y])
        unaries_acc = np.bincount(y.ravel(), selected_unaries.ravel(),
                minlength=self.n_states)

        ##accumulated pairwise
        #make one hot encoding
        labels = np.zeros((y.shape[0], y.shape[1], self.n_states),
                dtype=np.int)
        gx, gy = np.ogrid[:y.shape[0], :y.shape[1]]
        labels[gx, gy, y] = 1
        # vertical edges
        vert = np.dot(labels[1:, :, :].reshape(-1, self.n_states).T,
                labels[:-1, :, :].reshape(-1, self.n_states))
        # horizontal edges
        horz = np.dot(labels[:, 1:, :].reshape(-1, self.n_states).T, labels[:,
            :-1, :].reshape(-1, self.n_states))
        pw = vert + horz
        pw = pw + pw.T - np.diag(np.diag(pw))
        feature = np.hstack([unaries_acc, pw[np.tri(self.n_states,
            dtype=np.bool)]])
        return feature
コード例 #11
0
ファイル: test_data.py プロジェクト: dengemann/statsmodels
 def test_extra_kwargs_2d(self):
     sigma = np.random.random((25, 25))
     sigma = sigma + sigma.T - np.diag(np.diag(sigma))
     data = sm_data.handle_data(self.y, self.X, 'drop', sigma=sigma)
     idx = ~np.isnan(np.c_[self.y, self.X]).any(axis=1)
     sigma = sigma[idx][:,idx]
     np.testing.assert_array_equal(data.sigma, sigma)
コード例 #12
0
def get_eq_from_eig(m):   
    ''' get the equilibrium frequencies from the matrix. the eq freqs are the left eigenvector corresponding to eigenvalue of 0. 
        Code here is largely taken from Bloom. See here - https://github.com/jbloom/phyloExpCM/blob/master/src/submatrix.py, specifically in the fxn StationaryStates
    '''
    (w, v) = linalg.eig(m, left=True, right=False)
    max_i = 0
    max_w = w[max_i]
    for i in range(1, len(w)):
        if w[i] > max_w:
            max_w = w[i]
            max_i = i
    assert( abs(max_w) < ZERO ), "Maximum eigenvalue is not close to zero."
    max_v = v[:,max_i]
    max_v /= np.sum(max_v)
    eq_freqs = max_v.real # these are the stationary frequencies
    
    # SOME SANITY CHECKS
    assert np.allclose(np.zeros(61), np.dot(eq_freqs, m)) # should be true since eigenvalue of zero
    pi_inv = np.diag(1.0 / eq_freqs)
    s = np.dot(m, pi_inv)
    assert np.allclose(m, np.dot(s, np.diag(eq_freqs)), atol=ZERO, rtol=1e-5), "exchangeability and equilibrium does not recover matrix"
    
    # And for some impressive overkill, double check pi_i*q_ij = pi_j*q_ji
    for i in range(61):
        pi_i = eq_freqs[i]
        for j in range(61):
            pi_j = eq_freqs[j]
            forward  = pi_i * m[i][j] 
            backward = pi_j * m[j][i]
            assert(abs(forward - backward) < ZERO), "Detailed balance violated."    
    return eq_freqs
コード例 #13
0
 def testImplicitLargeDiag(self):
   mu = np.array([[1., 2, 3],
                  [11, 22, 33]])      # shape: [b, k] = [2, 3]
   u = np.array([[[1., 2],
                  [3, 4],
                  [5, 6]],
                 [[0.5, 0.75],
                  [1, 0.25],
                  [1.5, 1.25]]])      # shape: [b, k, r] = [2, 3, 2]
   m = np.array([[0.1, 0.2],
                 [0.4, 0.5]])         # shape: [b, r] = [2, 2]
   scale = np.stack([
       np.eye(3) + np.matmul(np.matmul(u[0], np.diag(m[0])),
                             np.transpose(u[0])),
       np.eye(3) + np.matmul(np.matmul(u[1], np.diag(m[1])),
                             np.transpose(u[1])),
   ])
   cov = np.stack([np.matmul(scale[0], scale[0].T),
                   np.matmul(scale[1], scale[1].T)])
   logging.vlog(2, "expected_cov:\n{}".format(cov))
   with self.test_session():
     mvn = ds.MultivariateNormalDiagPlusLowRank(
         loc=mu,
         scale_perturb_factor=u,
         scale_perturb_diag=m)
     self.assertAllClose(cov, mvn.covariance().eval(), atol=0., rtol=1e-6)
コード例 #14
0
ファイル: test_construct.py プロジェクト: 7924102/scipy
    def test_diags_vs_diag(self):
        # Check that
        #
        #    diags([a, b, ...], [i, j, ...]) == diag(a, i) + diag(b, j) + ...
        #

        np.random.seed(1234)

        for n_diags in [1, 2, 3, 4, 5, 10]:
            n = 1 + n_diags//2 + np.random.randint(0, 10)

            offsets = np.arange(-n+1, n-1)
            np.random.shuffle(offsets)
            offsets = offsets[:n_diags]

            diagonals = [np.random.rand(n - abs(q)) for q in offsets]

            mat = construct.diags(diagonals, offsets)
            dense_mat = sum([np.diag(x, j) for x, j in zip(diagonals, offsets)])

            assert_array_almost_equal_nulp(mat.todense(), dense_mat)

            if len(offsets) == 1:
                mat = construct.diags(diagonals[0], offsets[0])
                dense_mat = np.diag(diagonals[0], offsets[0])
                assert_array_almost_equal_nulp(mat.todense(), dense_mat)
コード例 #15
0
    def setUp(self):

        # Generate simple noiseless dataset

        self.nObservations = 10
        self.nParameters = 2
        self.time = np.arange(self.nObservations, dtype=np.double)
        self.regressorList = [np.ones_like(self.time), self.time]
        self.unweightedDesignMatrix = np.column_stack(self.regressorList)
        self.regressorNames = ["1", "t"]

        # First covariance matrix: different weights, no correlations

        self.covMatrixObserv1 = np.diag(0.1 + 0.1 * np.arange(self.nObservations))

        # Second covariance matrix: different weights and correlations
        # Correlation coefficient: 0.6

        self.covMatrixObserv2 = np.diag(0.1 + 0.1 * np.arange(self.nObservations))
        for i in range(self.nObservations):
            for j in range(self.nObservations):
                if i >= j:
                    continue
                self.covMatrixObserv2[i, j] = 0.6 * sqrt(self.covMatrixObserv2[i, i] * self.covMatrixObserv2[j, j])
                self.covMatrixObserv2[j, i] = self.covMatrixObserv2[i, j]
コード例 #16
0
ファイル: jacobi.py プロジェクト: terasakisatoshi/PythonCode
def main():
    n=10
    # prepare matrix a
    a=np.diag([5.]*n)
    a+=np.diagflat([2.]*(n-1),1)
    a+=np.diagflat([2.]*(n-1),-1)
    print(a)

    b=np.array([3,1,4,0,5,-1,6,-2,7,-15],dtype='f').T
    #initial value x
    x=np.ones(10).T

    D=np.diag(np.diag(a))
    L=np.tril(a,-1)
    U=np.triu(a,+1)
    M= -np.linalg.inv(D) @ (L+U)
    N=np.linalg.inv(D)
    for k in range(maxiteration):
        x_new=M @ x + N @ b
        if(np.linalg.norm(x_new-x) <epsilon):
            break
        x=x_new
    else:
        print("fail jacobi method ...")
        exit(1)

    print("the sol of ax = b using Jacobi method is \n{}".format(x))
    print("iteration {} times".format(k))
    print("indeed ax-b is \n{}".format(a @x -b))
    print("you can check sol x using sympy...")
    sy_a=sy.Matrix(a)
    sy_x=sy_a.solve(b)
    print(sy_x)
コード例 #17
0
ファイル: fastica.py プロジェクト: derek-r/scikit-learn
def _ica_par(X, tol, g, gprime, fun_args, max_iter, w_init):
    """Parallel FastICA.

    Used internally by FastICA --main loop

    """
    n, p = X.shape

    W = _sym_decorrelation(w_init)

    # we set lim to tol+1 to be sure to enter at least once in next while
    lim = tol + 1
    it = 0
    while ((lim > tol) and (it < (max_iter - 1))):
        wtx = np.dot(W, X)
        gwtx = g(wtx, fun_args)
        g_wtx = gprime(wtx, fun_args)
        W1 = np.dot(gwtx, X.T) / float(p) \
             - np.dot(np.diag(g_wtx.mean(axis=1)), W)

        W1 = _sym_decorrelation(W1)

        lim = max(abs(abs(np.diag(np.dot(W1, W.T))) - 1))
        W = W1
        it += 1

    return W
コード例 #18
0
ファイル: jac.py プロジェクト: liubenyuan/pyEIT
def h_matrix(Jac, p, lamb, method='kotre'):
    """
    JAC method of dynamic EIT solver:
        H = (J.T*J + lamb*R)^(-1) * J.T

    Parameters
    ----------
    Jac : NDArray
        Jacobian
    p, lamb : float
        regularization parameters
    method : str, optional
        regularization method

    Returns
    -------
    NDArray
        pseudo-inverse matrix of JAC
    """
    JWJ = np.dot(Jac.transpose(), Jac)
    if method is 'kotre':
        # see adler-dai-lionheart-2007, when
        # p=0   : noise distribute on the boundary
        # p=0.5 : noise distribute on the middle
        # p=1   : noise distribute on the center
        R = np.diag(np.diag(JWJ) ** p)
    else:
        # Marquardt–Levenberg, 'lm'
        R = np.eye(Jac.shape[1])

    # build H
    H = np.dot(la.inv(JWJ + lamb*R), Jac.transpose())
    return H
コード例 #19
0
ファイル: xbee_bridge_state.py プロジェクト: fatadama/CSCE635
 def update(self,tNow,lon_int,lat_int,t,vel,h):
     # Compute how long it's been since the system updated
     dtReal = tNow - self.tLast
     # rejection criteria
     if self.gpsState.ready and (abs( 1.0e-7*float(lon_int)-self.gpsState.lon ) > 0.01 or abs( 1.0e-7*float(lat_int)-self.gpsState.lat ) > 0.01):
         self.tLast = tNow
         return
     if self.gpsState.ready==False:
         self.gpsState.update(lon_int,lat_int,t,vel,h)
         # initialize the filter
         xk0 = np.array([self.gpsState.x,self.gpsState.y,0.0,0.0,0.0,0.0]) # initial state
         Pk0 = np.diag([math.pow(filter_dynamics.sigma_gps,2.0),math.pow(filter_dynamics.sigma_gps,2.0),1.0,1.0,1.0,1.0]) # initial covariance
         self.EKF.init_P(xk0,Pk0,t)
     else:
         # update the raw GPS object
         self.gpsState.update(lon_int,lat_int,t,vel,h)
         # test that dt is not negative
         dt = t-self.EKF.t
         if dt>0 and dt<10.0*max([dtReal,1.0]):
             # propagate the filter to the current time
             self.EKF.propagateOde(dt)
             # update the filter
             self.EKF.update(t,np.array([self.gpsState.x,self.gpsState.y]),filter_dynamics.measurement,filter_dynamics.measurementGradient,filter_dynamics.Rkin)
         else:
             print("Reject for back in time: dt = %g, dtReal=%g" % (dt,dtReal))
             pass
     # if the filter state matches the reading well enough, use it
     '''
     if math.sqrt( np.sum(np.power(self.EKF.xhat[0:2]-np.array([self.gpsState.x,self.gpsState.y]),2.0)) ) < 10.0:
         # copy the filter state to local
         self.filterState[0:2] = self.EKF.xhat[0:2].copy()
         self.filterState[2] = np.sqrt( np.sum(np.power(self.EKF.xhat[2:4],2.0)) )
         # If we're moving, use the velocity to approximate the heading; else, use the GPS heading
         if self.filterState[2] > 1.0:
             self.filterState[3] = np.arctan2( self.EKF.xhat[3],self.EKF.xhat[2] )
         else:
             self.filterState[3] = self.gpsState.hdg
     else:
         self.filterState[0] = self.gpsState.x
         self.filterState[1] = self.gpsState.y
         self.filterState[2] = self.gpsState.v
         self.filterState[3] = self.gpsState.hdg
     '''
     self.filterState[0] = self.gpsState.x
     self.filterState[1] = self.gpsState.y
     self.filterState[2] = self.gpsState.v
     self.filterState[3] = self.gpsState.hdg
     # Debug test print of state
     #print("%12.7g,%8.4g,%8.4g" % (tNow,self.filterState[2],self.filterState[3]))
     # reset the filter if things look bad
     # are the covariance diagonals zero or nan?
     if (self.EKF.Pk[0,0]==0.0) or (self.EKF.Pk[1,1]==0.0) or (self.EKF.Pk[2,2]==0.0) or (self.EKF.Pk[3,3]==0.0) or (self.EKF.Pk[4,4]==0.0) or (self.EKF.Pk[5,5]==0.0) or (np.any(np.isnan(np.diag(self.EKF.Pk)))):
         # initialize the filter
         xk0 = np.array([self.gpsState.x,self.gpsState.y,0.0,0.0,0.0,0.0]) # initial state
         Pk0 = np.diag([math.pow(filter_dynamics.sigma_gps,2.0),math.pow(filter_dynamics.sigma_gps,2.0),1.0,1.0,1.0,1.0]) # initial covariance
         self.EKF.init_P(xk0,Pk0,t)
     # call the log
     self.logFun(t,tNow)
     # update the time tracker
     self.tLast = tNow
コード例 #20
0
def lombscargle(ages, signal, ofac=4, hifac=1):
    r"""Calculates Lomb-Scargle Periodogram.

    Enter `signal` at times `ages` to compute the periodogram, with
    oversampling factor `ofac` and up to frequencies `hifac` * Nyquist.

    Return frequencies considered `freq`, the associated spectral `power`,
    and estimated significance of the power values `prob`.

    Note: the significance returned is the false alarm probability of the null
    hypothesis, i.e. that the data is composed of independent Gaussian random
    variables.  Low probability values indicate a high degree of significance
    in the associated periodic signal."""

    N, T = len(signal), ages.ptp()

    # Mean and variance.
    mu, s2 = signal.mean(), signal.var()

    # Calculate sampling frequencies.
    start = 1.0 / (T * ofac)
    stop = hifac * N / (2.0 * T)
    dt = 1.0 / (T * ofac)  # Interval for the frequencies.  Can be tweaked.
    freq = np.arange(start, stop + dt, dt)

    # Angular frequencies and constant offsets.
    w = 2.0 * np.pi * freq
    dot = np.dot(w[:, None], ages[None, :])
    A = np.sum(np.sin(2.0 * dot), axis=1)
    B = np.sum(np.cos(2.0 * dot), axis=1)
    tau = np.arctan2(A, B) / (2.0 * w)

    # Spectral power.
    cterm = np.cos(dot - (w * tau)[:, None])
    sterm = np.sin(dot - (w * tau)[:, None])

    ry = (np.sum(np.dot(cterm, np.diag(signal - mu)), axis=1) ** 2.0 /
          np.sum(cterm ** 2, axis=1))
    iy = (np.sum(np.dot(sterm, np.diag(signal - mu)), axis=1) ** 2.0 /
          np.sum(sterm ** 2, axis=1))

    # TODO: Phase (untested!)
    phLS = np.arctan2(ry, iy)

    power = (np.sum(np.dot(cterm, np.diag(signal - mu)), axis=1) ** 2.0 /
             np.sum(cterm ** 2, axis=1) +
             np.sum(np.dot(sterm, np.diag(signal - mu)), axis=1) ** 2.0 /
             np.sum(sterm ** 2, axis=1))

    power /= (2.0 * s2)

    # Estimate of the number of independent frequencies.
    M = 2.0 * len(freq) / ofac

    # Statistical significant of power.
    prob = M * np.exp(-power)
    inds = prob > 0.01
    prob[inds] = 1.0 - (1.0 - np.exp(-power[inds])) ** M

    return freq, power, prob, phLS
コード例 #21
0
ファイル: multiview.py プロジェクト: dakotabenjamin/OpenSfM
def KRt_from_P(P):
    '''Factorize the camera matrix into K,R,t as P = K[R|t].

    >>> K = np.array([[1, 2, 3],
    ...               [0, 4, 5],
    ...               [0, 0, 1]])
    >>> R = np.array([[ 0.57313786, -0.60900664,  0.54829181],
    ...               [ 0.74034884,  0.6716445 , -0.02787928],
    ...               [-0.35127851,  0.42190588,  0.83582225]])
    >>> t = np.array([1, 2, 3])
    >>> P = P_from_KRt(K, R, t)
    >>> KK, RR, tt = KRt_from_P(P)
    >>> np.allclose(K, KK)
    True
    >>> np.allclose(R, RR)
    True
    >>> np.allclose(t, tt)
    True
    '''
    K, R = rq(P[:, :3])

    T = np.diag(np.sign(np.diag(K))) # ensure K has positive diagonal
    K = np.dot(K, T)
    R = np.dot(T, R)
    t = np.linalg.solve(K, P[:,3])
    if np.linalg.det(R) < 0:         # ensure det(R) = 1
        R = -R
        t = -t
    K /= K[2, 2]                     # normalise K

    return K, R, t
コード例 #22
0
ファイル: test_olse.py プロジェクト: gelman/ep-stan
def random_cov(d, diff=None):
    """Generate random covariance matrix.
    
    Generates a random covariance matrix, or two dependent covariance matrices
    if the argument `diff` is given.
    
    """
    S = 0.8*np.random.randn(d,d)
    copy_triu_to_tril(S)
    np.fill_diagonal(S,0)
    mineig = linalg.eigvalsh(S, eigvals=(0,0))[0]
    drand = 0.8*np.random.randn(d)
    if mineig < 0:
        S += np.diag(np.exp(drand)-mineig)
    else:
        S += np.diag(np.exp(drand))
    if not diff:
        return S.T
    S2 = S * np.random.randint(2, size=(d,d))*np.exp(diff*np.random.randn(d,d))
    copy_triu_to_tril(S2)
    np.fill_diagonal(S2,0)
    mineig = linalg.eigvalsh(S2, eigvals=(0,0))[0]
    drand += diff*np.random.randn(d)
    if mineig < 0:
        S2 += np.diag(np.exp(drand)-mineig)
    else:
        S2 += np.diag(np.exp(drand))
    return S.T, S2.T
コード例 #23
0
ファイル: SynGraphL2.py プロジェクト: lucasant10/Twitter
def genGraph(S_actual, S_est, S_previous, empCov_set, nodeID, e1, e2, e3, e4, display = False):
    D = np.where(S_est != 0)[0].shape[0]
    T = np.where(S_actual != 0)[0].shape[0]
    TandD = float(np.where(np.logical_and(S_actual,S_est) == True)[0].shape[0])
    P = TandD/D
    R = TandD/T
    offDiagDiff = S_actual - S_est
    offDiagDiff = offDiagDiff - np.diag(np.diag(offDiagDiff))
    S_diff = (S_est - S_previous)  
    S_diff = S_diff - np.diag(np.diag(S_diff))
    ind = (S_diff < 1e-2) & (S_diff > - 1e-2)
    S_diff[ind] = 0    
    K = np.count_nonzero(S_diff)
    e1.append( alg.norm(offDiagDiff, 'fro'))
    e2.append(2* P*R/(P+R))
    
    
    K = float(np.where(np.logical_and((S_est>0) != (S_previous>0), S_est>0) == True)[0].shape[0])
    e3.append(-np.log(alg.det(S_est)) + np.trace(np.dot(S_est, empCov_set[nodeID])) + K)
    e4.append(alg.norm(S_est -  S_previous, 'fro'))
    
    display = False
    if display == True:
        if (nodeID >timeShift -10) and (nodeID < timeShift + 10):
            print 'nodeID = ', nodeID
            print 'S_true = ', S_actual,'\nS_est', S_est
#            print 'S_error = ',S_actual - S_est, '\n its Fro error = ', alg.norm(S_actual - S_est, 'fro')
            print 'D = ',D,'T = ', T,'TandD = ', TandD,'K = ', K,'P = ', P,'R = ', R,'Score = ', 2* P*R/(P+R)
            
    return e1, e2, e3, e4
def integration_recruitment(MA, S):
    '''
    Input Module-Allegiance "MA" and community strucutre "S"
    Output Integration and Recruitment
    '''


    # transform S to a column vector

    if min(S) == 1:
        S = S-1
    if np.shape(S)[0] == 1:
        S = S.T
    MA = np.double(MA)
    num_node = len(S)
    num_cl = max(S)+1
    H = np.zeros(shape=(num_node, num_cl), dtype = np.double)
    for i in range(num_cl):
        H[:,i] = (S==i)
    D_H = (H.T).dot(H)

    recruitment = np.zeros(shape = (num_cl, num_cl))
    integration = np.zeros(shape = (num_cl, num_cl))

    D_H_Inv = linalg.inv(D_H)
    recruitment = D_H_Inv.dot(H.T).dot(MA).dot(H).dot(D_H_Inv)
    D = np.diag(np.diag(recruitment))
    D_Inv_Sqr = np.power(D, -0.5)
    integration = D_Inv_Sqr.dot(recruitment).dot(D_Inv_Sqr)
    return (integration,recruitment)
def Haffine_from_points(fp, tp):
    '''计算仿射变换的单应性矩阵H,使得tp是由fp经过仿射变换得到的'''
    if fp.shape != tp.shape:
        raise RuntimeError('number of points do not match')

    # 对点进行归一化
    # 映射起始点
    m = numpy.mean(fp[:2], axis=1)
    maxstd = numpy.max(numpy.std(fp[:2], axis=1)) + 1e-9
    C1 = numpy.diag([1/maxstd, 1/maxstd, 1])
    C1[0, 2] = -m[0] / maxstd
    C1[1, 2] = -m[1] / maxstd
    fp_cond = numpy.dot(C1, fp)

    # 映射对应点
    m = numpy.mean(tp[:2], axis=1)
    maxstd = numpy.max(numpy.std(tp[:2], axis=1)) + 1e-9
    C2 = numpy.diag([1/maxstd, 1/maxstd, 1])
    C2[0, 2] = -m[0] / maxstd
    C2[1, 2] = -m[1] / maxstd
    tp_cond = numpy.dot(C2, tp)

    # 因为归一化之后点的均值为0,所以平移量为0
    A = numpy.concatenate((fp_cond[:2], tp_cond[:2]), axis=0)
    U, S, V = numpy.linalg.svd(A.T)
    # 创建矩阵B和C
    tmp = V[:2].T
    B = tmp[:2]
    C = tmp[2:4]

    tmp2 = numpy.concatenate((numpy.dot(C, numpy.linalg.pinv(B)), numpy.zeros((2, 1))), axis=1)
    H = numpy.vstack((tmp2, [0, 0, 1]))

    H = numpy.dot(numpy.linalg.inv(C2), numpy.dot(H, C1))  # 反归一化
    return H / H[2, 2]  # 归一化,然后返回
コード例 #26
0
ファイル: weights.py プロジェクト: slinderman/graphistician
    def initialize_hypers(self, W):
        mu_0 = W.mean(axis=(0,1))
        sigma_0 = np.diag(W.var(axis=(0,1)))

        # Set the global cov
        nu_0 = self._cov_model.nu_0
        self._cov_model.sigma_0 = sigma_0 * (nu_0 - self.B - 1)

        # Set the mean
        for c1 in xrange(self.C):
            for c2 in xrange(self.C):
                self._gaussians[c1][c2].mu_0 = mu_0
                self._gaussians[c1][c2].sigma = self._cov_model.sigma_0
                self._gaussians[c1][c2].resample()

        if self.special_case_self_conns:
            W_self = W[np.arange(self.N), np.arange(self.N)]
            self._self_gaussian.mu_0 = W_self.mean(axis=0)
            self._self_gaussian.sigma_0 = np.diag(W_self.var(axis=0))
            self._self_gaussian.resample()

        # Cluster the neurons based on their rows and columns
        from sklearn.cluster import KMeans
        features = np.hstack((W[:,:,0], W[:,:,0].T))
        km = KMeans(n_clusters=self.C)
        km.fit(features)
        self.c = km.labels_.astype(np.int)

        print "Initial c: ", self.c
def sig_lmc(C, A):
    '''
    This a function that using Lumped Markov chain to calculate
    the significance of clusters in a give communinity structure.
    refer to "Piccardi 2011 in PloS one".
    Here we normalize the original definition of persistence by
    the size of the corresponding cluster to get a better
    INPUT:
        "A" is a N-by-N weighted adjacency matrix
        "C" is a N-by-1 partition(cluster) vector
    OUTPUT:
        normalized persistence probability of all clusters
    '''
    '''
    Transition Matrix
    '''
    C = np.asarray(C)
    A = np.double(A)
    P = np.linalg.solve(np.diag(np.sum(A,axis = 1)),A)
    [eval, evec] = linalg.eigs(P.T, 1)
    if min(evec)<0:
        evec = -evec
    pi = np.double(evec.T)
    num_node = np.double(np.shape(A)[0])
    cl_label = np.double(np.unique(C))
    num_cl = len(cl_label)
    H = np.zeros((num_node, num_cl),dtype = np.double)
    for i in range(num_cl):
        H[:, i] = np.double((C==cl_label[i]))

    # Transition matrix of the lumped Markov chain

    Q = np.dot(np.dot(np.dot(np.linalg.solve(np.diag(np.dot(pi,H).flatten()),H.T),np.diag(pi.flatten())),P),H)
    persistence = np.multiply(np.divide(np.diag(Q), np.sum(H,axis = 0)),np.sum(H))
    return persistence
コード例 #28
0
ファイル: make_aux_data.py プロジェクト: Cysu/noisy_label
def compute_matrix_c(clean_labels, noisy_labels):
    cm = confusion_matrix(clean_labels, noisy_labels)
    cm -= np.diag(np.diag(cm))
    cm = cm * 1.0 / cm.sum(axis=1, keepdims=True)
    cm = cm.T
    L = len(cm)
    alpha = 1.0 / (L - 1)
    C = np.zeros((L, L))
    for j in xrange(L):
        f = cm[:, j].ravel()
        f = zip(f, xrange(L))
        f.sort(reverse=True)
        best_lik = -np.inf
        best_i = -1
        for i in xrange(L + 1):
            c = np.zeros((L,))
            for k in xrange(0, i):
                c[k] = f[k][0]
            if c.sum() > 0:
                c /= c.sum()
            lik = 0
            for k in xrange(0, i):
                lik += f[k][0] * np.log(c[k])
            for k in xrange(i, L):
                lik += f[k][0] * np.log(alpha)
            if lik >= best_lik:
                best_lik = lik
                best_i = i
            if i < L and f[i][0] == 0:
                break
        for k in xrange(0, best_i):
            C[f[k][1], j] = f[k][0]
    return C / C.sum(axis=0)
コード例 #29
0
    def generate_time_of_use_periods(self):
        """
        time of use periods will be described by NxM indicator matricies

        """
        N = const.DAILY_UNITS
        quarters = self.generate_quarter_hours()
        
        peak_indicator = [1 if ( (t >= const.PEAK_TIME_RANGE[0]) & (t < const.PEAK_TIME_RANGE[1])) else 0 for t in quarters]

        part_peak_indicator = [1 if ( (t >= const.PART_PEAK_TIME_RANGE[0][0]) and (t < const.PART_PEAK_TIME_RANGE[0][1])
                                        or t >= const.PART_PEAK_TIME_RANGE[1][0]) and (t < const.PART_PEAK_TIME_RANGE[1][1]) else 0 for t in quarters]

        off_peak_indicator = [1 if ( (t >= const.OFF_PEAK_TIME_RANGE[0][0]) and (t < const.OFF_PEAK_TIME_RANGE[0][1])
                                        or t >= const.OFF_PEAK_TIME_RANGE[1][0]) and (t < const.OFF_PEAK_TIME_RANGE[1][1]) else 0 for t in quarters]

        peak_day = np.diag(peak_indicator)
        part_peak = np.diag(part_peak_indicator)
        off_peak_weekday = np.diag(off_peak_indicator) 

        off_peak_weekend_off = np.zeros([N,N]) # used for peak, part_peak
        off_peak_weekend_on  = np.diag([1]*N) # used for off_peak

        # each of these will block_diag 5 week day indicators and 2 weekend indicators
        self.peak_mat = block_diag(peak_day, peak_day, peak_day, peak_day, peak_day,
                                        off_peak_weekend_off, off_peak_weekend_off)
        self.part_peak_mat = block_diag(part_peak, part_peak, part_peak, part_peak, part_peak,
                                        off_peak_weekend_off,off_peak_weekend_off)
        self.off_peak_mat = block_diag(off_peak_weekday, off_peak_weekday, off_peak_weekday, off_peak_weekday, off_peak_weekday,
                                        off_peak_weekend_on, off_peak_weekend_on)
        self.all_peak_mat = np.eye(self.horizon)
コード例 #30
0
ファイル: multiview.py プロジェクト: dakotabenjamin/OpenSfM
def R_from_homography(H, f1, f2):
    K1 = np.diag([f1, f1, 1])
    K2 = np.diag([f2, f2, 1])
    K2inv = np.linalg.inv(K2)
    R = K2inv.dot(H).dot(K1)
    R = project_to_rotation_matrix(R)
    return R
コード例 #31
0
def fmda_advance_region(cycle, cfg, rtma, wksp_path, lookback_length, meso_token):
    """
    Advance the fuel moisture estimates in the region specified by the configuration.
    The function assumes that the fuel moisture model has not been advanced to this
    cycle yet and will overwrite any previous computations.
    
    Control flow:
    
    1) read in RTMA variables
    2) check if there is a stored FM model for previous cycle
    2a) yes -> load it, advance one time-step, perform DA
    2b) no -> compute equilibrium, use background covariance to do DA
    3) store model
    
    :param cycle: the datetime indicating the processed cycle in UTC
    :param cfg: the configuration dictionary specifying the region
    :param rtma: the RTMA object that can be used to retrieve variables for this cycle
    :param wksp_path: the workspace path for the cycler
    :param lookback_length: number of cycles to search before we find a computed cycle
    :param meso_token: the mesowest API access token
    :return: the model advanced and assimilated at the current cycle
    """
    logging.info("rtma_cycler.fmda_advance_region: %s" % str(cycle))
    model = None
    prev_cycle = cycle - timedelta(hours=1)
    prev_model_path = compute_model_path(prev_cycle, cfg.code, wksp_path)
    if not osp.exists(prev_model_path):
        logging.info('CYCLER cannot find model from previous cycle %s' % str(prev_cycle))
        if lookback_length > 0:
            model = fmda_advance_region(cycle - timedelta(hours=1), cfg, rtma, wksp_path, lookback_length - 1, meso_token)
    else:
        logging.info('CYCLER found previous model for cycle %s.' % str(prev_cycle))
        model = FuelMoistureModel.from_netcdf(prev_model_path)
        
    # retrieve the variables and make sure they are available (we should not be here if they are not)
    try:
        dont_have_vars, have_vars = rtma.retrieve_rtma(cycle)
    except ValueError as e:
        logging.error(e)
        sys.exit(1) 
    assert not dont_have_vars
    
    logging.info('CYCLER loading RTMA data for cycle %s.' % str(cycle))
    TD, T2, RH, precipa, hgt, lats, lons = load_rtma_data(have_vars, cfg.bbox)
    Ed, Ew = compute_equilibria(T2, RH)

    rain = precipa[:,:] + 0
    # remove rain that is too small to make any difference 
    rain[rain < 0.01] = 0
    # remove bogus rain that is too large 
    rain[rain > 1e10] = 0

    dom_shape = T2.shape

    # store the lons/lats for this domain
    geo_path = osp.join(wksp_path, '%s-geo.nc' % cfg.code)
    if not osp.isfile(geo_path):
        logging.info('CYCLER initializing new file %s.' % (geo_path))
        d = netCDF4.Dataset(geo_path, 'w', format='NETCDF4')
        d.createDimension('south_north', dom_shape[0])
        d.createDimension('west_east', dom_shape[1])
        xlat = d.createVariable('XLAT', 'f4', ('south_north', 'west_east'))
        xlat[:,:] = lats
        xlong = d.createVariable('XLONG', 'f4', ('south_north', 'west_east'))
        xlong[:,:] = lons
        d.close()
    else:
        logging.info('CYCLER file already exists:  %s.' % (geo_path))
    
    
    # the process noise matrix
    Q = np.diag([1e-4,5e-5,1e-5,1e-6,1e-6])
    
    # background covariance
    P0 = np.diag([0.01,0.01,0.01,0.001,0.001])

    # check if we must start from equilibrium
    if model is None:
        logging.info('CYCLER initializing from equilibrium for cycle %s.' % (str(cycle)))
        # setup model parameters    
        Nk = 3
        Tk = np.array([1.0, 10.0, 100.0])
        m0 = np.expand_dims(0.5 * (Ed + Ew), axis=2)
        model = FuelMoistureModel(m0[:,:,[0,0,0]], Tk, P0)
    else:
        logging.info('CYCLER advancing model one hour to cycle %s.' % (str(cycle)))
        dt = 3600 # always 1 hr step in RTMA
        model.advance_model(Ed, Ew, rain, dt, Q)

    logging.info('CYCLER retrieving fm-10 observations for cycle %s.' % (str(cycle)))
    
    # perform assimilation with mesowest observations
    tm_start = cycle - timedelta(minutes=30)
    tm_end = cycle + timedelta(minutes=30)
    fm10 = retrieve_mesowest_observations(meso_token, tm_start, tm_end, lats, lons)
    fm10v = []
    for fm10_obs in fm10.values():
        for obs in fm10_obs:
            fm10v.append(obs.get_value())
    
    logging.info('CYCLER retrieved %d valid observations, min/mean/max [%g/%g/%g].' %
                 (len(fm10),np.amin(fm10v),np.mean(fm10v),np.amax(fm10v)))
    
    # run the data assimilation step
    covs = [np.ones(dom_shape), hgt / 2000.0]
    covs_names = ['const','hgt/2000']
    if np.any(rain > 0.01):
        covs.append(rain)
        covs_names.append('rain')
    execute_da_step(model, cycle, covs, covs_names, fm10)
    
    # make geogrid files for WPS; datasets and lines to add to GEOGRID.TBL
    geo_path = compute_model_path(cycle, cfg.code, wksp_path,ext="geo")
    index = rtma.geogrid_index()
    print('index',index)
    model.to_geogrid(geo_path,index,lats,lons)

    # store the new model  
    model_path = compute_model_path(cycle, cfg.code, wksp_path)
    logging.info('CYCLER writing model variables to:  %s.' % model_path)
    model.to_netcdf(ensure_dir(model_path),
        {'EQUILd FM':Ed,'EQUILw FM':Ew,'TD':TD,'T2':T2,'RH':RH,'PRECIPA':precipa,'PRECIP':rain,'HGT':hgt})
    
    return model
def findBestLambda(X, z, f, k, lambdas, degree, bestLambda = True):
    lamErrOLS = np.zeros(lambdas.shape[0])
    lamErrRidge = np.zeros(lambdas.shape[0])
    lamErrLasso = np.zeros(lambdas.shape[0])

    numBetas = X.shape[1]
    numLambdas = len(lambdas)
    betasOLS = np.empty((numLambdas, numBetas))
    betasRidge = np.empty((numLambdas, numBetas))
    betasLasso = np.empty((numLambdas, numBetas))

    betasSigmaOLS = np.empty((numLambdas, numBetas))
    betasSigmaRidge = np.empty((numLambdas, numBetas))
    betasSigmaLasso = np.empty((numLambdas, numBetas))

    ##################################
    # Start of K-fold algorithm      #
    ##################################

    # Generally k = 5 is a good choice
    #k = 50
    kf = k_fold(n_splits=k, shuffle=True)
    kf.get_n_splits(X)

    for nlam, _lambda in enumerate(lambdas):
        ######### KFold! #############

        errorsOLS = np.empty(k)
        zPredictsOLS = np.empty((int(z.shape[0]/k)))
        betasOLSTemp = np.empty((k, numBetas))
        betasSigmaOLSTemp = np.empty((k, numBetas))

        errorsRidge = np.empty(k)
        zPredictsRidge = np.empty((int(z.shape[0]/k)))
        betasRidgeTemp = np.empty((k, numBetas))
        betasSigmaRidgeTemp = np.empty((k, numBetas))

        errorsLasso = np.empty(k)
        zPredictsLasso = np.empty((int(z.shape[0]/k)))
        betasLassoTemp = np.empty((k, numBetas))
        betasSigmaLassoTemp = np.empty((k, numBetas))

        #zTests = np.empty((int(z.shape[0]/k)))
        i = 0
        X_rest, z_rest, f_rest = X, z, f
        for train_index, test_index in kf.split():
            X_train, X_validation = X_rest[train_index], X_rest[test_index]
            z_train, z_validation = z_rest[train_index], z_rest[test_index]
            f_train, f_validation = f_rest[train_index], f_rest[test_index]

            # OLS, Finding the best lambda
            betaOLS = linFit(X_train, z_train, model='OLS', _lambda = _lambda)
            betasOLSTemp[i] = betaOLS.reshape(-1)
            zPredictsOLS = (X_validation @ betaOLS)
            errorsOLS[i] = np.mean((z_validation - zPredictsOLS)**2)
            sigmaOLSSq = 1/(X_validation.shape[0] - 0*X_validation.shape[1]) * np.sum((z_validation - zPredictsOLS)**2)
            sigmaBetaOLSSq = sigmaOLSSq * np.diag(np.linalg.pinv(X_validation.T @ X_validation))
            betasSigmaOLSTemp[i] = np.sqrt(sigmaBetaOLSSq)


            # Ridge, Finding the best lambda
            betaRidge = linFit(X_train, z_train, model='Ridge', _lambda = _lambda)
            betasRidgeTemp[i] = betaRidge.reshape(-1)
            zPredictsRidge = (X_validation @ betaRidge)
            errorsRidge[i] = np.mean((z_validation - zPredictsRidge)**2)
            sigmaRidgeSq = 1/(X_validation.shape[0] - 0*X_validation.shape[1]) * np.sum((z_validation - zPredictsRidge)**2)
            XInvRidge = np.linalg.pinv(X_validation.T @ X_validation + _lambda * np.eye(len(betaRidge)))
            sigmaBetaRidgeSq = sigmaRidgeSq * np.diag(XInvRidge @ X_validation.T @ X_validation @ XInvRidge.T)
            betasSigmaRidgeTemp[i] = np.sqrt(sigmaBetaRidgeSq)

            # Lasso, Finding the best lambda

            lasso = skl.Lasso(alpha = _lambda, fit_intercept=True, max_iter=10**9, precompute=True).fit(X_train, z_train)
            betaLasso = lasso.coef_
            betasLassoTemp[i] = betaLasso.reshape(-1)
            zPredictsLasso = lasso.predict(X_validation)
            errorsLasso[i] = mean_squared_error(z_validation, zPredictsLasso)

            i += 1
            #print(i, nlam)

        betasOLS[nlam] = np.mean(betasOLSTemp,axis=0)
        betasRidge[nlam] = np.mean(betasRidgeTemp,axis=0)
        betasLasso[nlam] = np.mean(betasLassoTemp,axis=0)
        betasSigmaOLS[nlam] = np.mean(betasSigmaOLSTemp, axis=0)
        betasSigmaRidge[nlam] = np.mean(betasSigmaRidgeTemp, axis = 0)
        lamErrOLS[nlam] = min(errorsOLS)
        lamErrRidge[nlam] = min(errorsRidge)
        lamErrLasso[nlam] = min(errorsLasso)
    if bestLambda == False:
        # in this case, return the betas and errors
        return betasOLS, betasRidge, betasLasso, betasSigmaOLS, betasSigmaRidge, lamErrOLS, lamErrRidge, lamErrLasso
    else:
        # In this case just return the best value for Ridge and Lasso in that order
        minimumRidge = np.min(np.array([lambdas[lamErrRidge == min(lamErrRidge)]]))
        minimumLasso = np.min(np.array([lambdas[lamErrLasso == min(lamErrLasso)]]))
        return minimumRidge, minimumLasso
コード例 #33
0
ファイル: face-recog.py プロジェクト: wenyi616/face-recog
# 1.e | perform SVD and compute eigenface (VT)
U, s, VT = np.linalg.svd(new_train_data)
VT = np.array(VT, dtype=float)

plt.figure(figsize=(10, 5))
for i in range(10):
    ax = plt.subplot(2, 5, i+1)
    ax.imshow(VT[i].reshape(50,50), cmap = cm.Greys_r)
    
    
# 1.f | r-rank approx error
errors = []
for r in range(1,201):
    # U[:,: r]  Σ[: r,: r]  VT [: r,:]
    rank_r = np.dot(np.dot(U[:,:r],np.diag(s[:r])),VT[:r,:])
    errors.append(np.linalg.norm(new_train_data - rank_r))

rs = range(1, 201)

plt.figure(figsize=(8, 5))
plt.plot(rs, errors)

plt.xlabel("r")
plt.ylabel("rank-r approximation error")
plt.show()

# 1.g | generate eigenface feature matrix
def eigenface_feature(matrix, r):
    # to get F, multiply X to the transpose of first r rows of VT   (VT[: r,:])
    
コード例 #34
0
logging = True

########################################################################
#For position estimate
del_t = 0.01  #sec
g = 0  #m/s^2
x_old = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=np.float)  #m, m/s, rad
omega = np.array([0, 0, 0], dtype=np.float)  #rad/sec
acc = np.array([0, 0, -g], dtype=np.float)  #m/s^2
gamma = 800  # 0~1000
P_old = np.identity(9) * gamma
I_9 = np.identity(9)
acc_noise = 0.001
gyro_noise = 0.0003468268
QQ = np.diag([
    0, 0, 0, acc_noise, acc_noise, acc_noise, gyro_noise, gyro_noise,
    gyro_noise
])
RR = np.diag([0.4, 0.4, 0.4, 0.02, 0.02, 0.02, 0.02, 0.01])

alfa = np.array([0.8244, 0.8244, 0.8244], dtype=np.float)
m9a_low_old = np.array([0, 0, 0], dtype=np.float)
m9g_low_old = np.array([0, 0, 0], dtype=np.float)

anchor1 = np.array([-2.0, 2.306, 2.227], dtype=float)
anchor2 = np.array([2.0, 2.306, 2.227], dtype=float)
anchor3 = np.array([2.0, -2.306, 2.227], dtype=float)
anchor4 = np.array([-2.0, -2.306, 2.227], dtype=float)

#global dd1
#global dd2
#global dd3
コード例 #35
0
ファイル: _scaling.py プロジェクト: tskarvone/probnum
    def __init__(
        self,
        factors: Union[np.ndarray, ScalarArgType],
        shape: Optional[ShapeArgType] = None,
        dtype: Optional[DTypeArgType] = None,
    ):
        self._factors = None
        self._scalar = None

        if np.ndim(factors) == 0:
            # Isotropic scaling
            self._scalar = probnum.utils.as_numpy_scalar(factors, dtype=dtype)

            if shape is None:
                raise ValueError(
                    "When specifying the scaling factors by a scalar, a shape must be "
                    "specified."
                )

            shape = probnum.utils.as_shape(shape)

            if len(shape) == 1:
                shape = 2 * shape
            elif len(shape) != 2:
                raise ValueError(
                    "The shape of a linear operator must be two-dimensional."
                )

            if shape[0] != shape[1]:
                raise np.linalg.LinAlgError("Scaling operators must be square.")

            dtype = self._scalar.dtype

            if self._scalar == 1:
                # Identity
                matmul = lambda x: x.astype(
                    np.result_type(self.dtype, x.dtype), copy=False
                )
                rmatmul = lambda x: x.astype(
                    np.result_type(self.dtype, x.dtype), copy=False
                )

                apply = lambda x, axis: x.astype(
                    np.result_type(self.dtype, x.dtype), copy=False
                )

                todense = lambda: np.identity(shape[0], dtype=dtype)

                conjugate = lambda: self
                inverse = lambda: self

                rank = lambda: np.intp(shape[0])
                cond = self._cond_isotropic
                eigvals = lambda: np.ones(shape[0], dtype=self._inexact_dtype)
                det = lambda: self._scalar.astype(self._inexact_dtype, copy=False)
                logabsdet = lambda: (0 * self._scalar).astype(
                    self._inexact_dtype, copy=False
                )
            else:
                matmul = lambda x: self._scalar * x
                rmatmul = lambda x: self._scalar * x

                apply = lambda x, axis: self._scalar * x

                todense = self._todense_isotropic

                conjugate = lambda: (
                    self
                    if np.imag(self._scalar) == 0
                    else Scaling(np.conj(self._scalar), shape=shape)
                )
                inverse = self._inverse_isotropic

                rank = lambda: np.intp(0 if self._scalar == 0 else shape[0])
                cond = self._cond_isotropic
                eigvals = lambda: np.full(
                    shape[0], self._scalar, dtype=self._inexact_dtype
                )
                det = lambda: (
                    self._scalar.astype(self._inexact_dtype, copy=False) ** shape[0]
                )
                logabsdet = lambda: (
                    probnum.utils.as_numpy_scalar(-np.inf, dtype=self._inexact_dtype)
                    if self._scalar == 0
                    else shape[0] * np.log(np.abs(self._scalar))
                )

            trace = lambda: self.shape[0] * self._scalar
        elif np.ndim(factors) == 1:
            # Anisotropic scaling
            self._factors = np.asarray(factors, dtype=dtype)
            self._factors.setflags(write=False)

            shape = 2 * self._factors.shape
            dtype = self._factors.dtype

            matmul = lambda x: self._factors[:, np.newaxis] * x
            rmatmul = lambda x: self._factors * x

            apply = lambda x, axis: (
                self._factors.reshape((-1,) + (x.ndim - (axis + 1)) * (1,)) * x
            )

            todense = lambda: np.diag(self._factors)

            conjugate = lambda: (
                self
                if (
                    not np.issubdtype(dtype, np.complexfloating)
                    or np.all(np.imag(self._factors) == 0)
                )
                else Scaling(np.conj(self._factors))
            )
            inverse = self._inverse_anisotropic

            rank = lambda: np.count_nonzero(self.factors, axis=0)
            eigvals = lambda: self._factors
            cond = self._cond_anisotropic
            det = lambda: np.prod(self._factors).astype(self._inexact_dtype, copy=False)
            logabsdet = None
            trace = lambda: np.sum(self._factors)
        else:
            raise TypeError(
                "`factors` must either be a scalar or a 1-dimensional array-like"
            )

        super().__init__(
            shape,
            dtype,
            matmul=matmul,
            rmatmul=rmatmul,
            apply=apply,
            todense=todense,
            conjugate=conjugate,
            transpose=lambda: self,
            adjoint=conjugate,
            inverse=inverse,
            rank=rank,
            eigvals=eigvals,
            cond=cond,
            det=det,
            logabsdet=logabsdet,
            trace=trace,
        )
コード例 #36
0
    def termite_plot(
        self,
        doc_term_matrix,
        id2term,
        *,
        topics=-1,
        sort_topics_by="index",
        highlight_topics=None,
        n_terms=25,
        rank_terms_by="topic_weight",
        sort_terms_by="seriation",
        save=False,
        rc_params=None,
    ):
        """
        Make a "termite" plot for assessing topic models using a tabular layout
        to promote comparison of terms both within and across topics.

        Args:
            doc_term_matrix (:class:`numpy.ndarray` or sparse matrix): corpus
                represented as a document-term matrix with shape (n_docs, n_terms);
                may have tf- or tfidf-weighting
            id2term (List[str] or dict): object that returns the term string corresponding
                to term id ``i`` through ``id2term[i]``; could be a list of strings
                where the index represents the term id, such as that returned by
                ``sklearn.feature_extraction.text.CountVectorizer.get_feature_names()``,
                or a mapping of term id: term string
            topics (int or Sequence[int]): topic(s) to include in termite plot;
                if -1, all topics are included
            sort_topics_by ({'index', 'weight'}):
            highlight_topics (int or Sequence[int]): indices for up to 6 topics
                to visually highlight in the plot with contrasting colors
            n_terms (int): number of top terms to include in termite plot
            rank_terms_by ({'topic_weight', 'corpus_weight'}): value used
                to rank terms; the top-ranked ``n_terms`` are included in the plot
            sort_terms_by ({'seriation', 'weight', 'index', 'alphabetical'}):
                method used to vertically sort the selected top ``n_terms`` terms;
                the default ("seriation") groups similar terms together, which
                facilitates cross-topic assessment
            save (str): give the full /path/to/fname on disk to save figure
                rc_params (dict, optional): allow passing parameters to rc_context in matplotlib.plyplot,
                details in https://matplotlib.org/3.1.0/api/_as_gen/matplotlib.pyplot.rc_context.html

        Returns:
            ``matplotlib.axes.Axes.axis``: Axis on which termite plot is plotted.

        Raises:
            ValueError: if more than 6 topics are selected for highlighting, or
                an invalid value is passed for the sort_topics_by, rank_terms_by,
                and/or sort_terms_by params

        References:
            - Chuang, Jason, Christopher D. Manning, and Jeffrey Heer. "Termite:
              Visualization techniques for assessing textual topic models."
              Proceedings of the International Working Conference on Advanced
              Visual Interfaces. ACM, 2012.
            - for sorting by "seriation", see https://arxiv.org/abs/1406.5370

        See Also:
            :func:`viz.termite_plot <textacy.viz.termite.termite_plot>`

        TODO: `rank_terms_by` other metrics, e.g. topic salience or relevance
        """
        if highlight_topics is not None:
            if isinstance(highlight_topics, int):
                highlight_topics = (highlight_topics, )
            elif len(highlight_topics) > 6:
                raise ValueError(
                    "no more than 6 topics may be highlighted at once")

        # get topics indices
        if topics == -1:
            topic_inds = tuple(range(self.n_topics))
        elif isinstance(topics, int):
            topic_inds = (topics, )
        else:
            topic_inds = tuple(topics)

        # get topic indices in sorted order
        if sort_topics_by == "index":
            topic_inds = sorted(topic_inds)
        elif sort_topics_by == "weight":
            topic_inds = tuple(topic_ind for topic_ind in np.argsort(
                self.topic_weights(self.transform(doc_term_matrix)))[::-1]
                               if topic_ind in topic_inds)
        else:
            raise ValueError(
                errors.value_invalid_msg(
                    "sort_topics_by",
                    sort_topics_by,
                    {"index", "weight"},
                ))

        # get column index of any topics to highlight in termite plot
        if highlight_topics is not None:
            highlight_cols = tuple(i for i in range(len(topic_inds))
                                   if topic_inds[i] in highlight_topics)
        else:
            highlight_cols = None

        # get top term indices
        if rank_terms_by == "corpus_weight":
            term_inds = np.argsort(np.ravel(
                doc_term_matrix.sum(axis=0)))[:-n_terms - 1:-1]
        elif rank_terms_by == "topic_weight":
            term_inds = np.argsort(
                self.model.components_.sum(axis=0))[:-n_terms - 1:-1]
        else:
            raise ValueError(
                errors.value_invalid_msg(
                    "rank_terms_by",
                    rank_terms_by,
                    {"corpus_weight", "topic_weight"},
                ))

        # get top term indices in sorted order
        if sort_terms_by == "weight":
            pass
        elif sort_terms_by == "index":
            term_inds = sorted(term_inds)
        elif sort_terms_by == "alphabetical":
            term_inds = sorted(term_inds, key=lambda x: id2term[x])
        elif sort_terms_by == "seriation":
            topic_term_weights_mat = np.array(
                np.array([
                    self.model.components_[topic_ind][term_inds]
                    for topic_ind in topic_inds
                ])).T
            # calculate similarity matrix
            topic_term_weights_sim = np.dot(topic_term_weights_mat,
                                            topic_term_weights_mat.T)
            # substract minimum of sim mat in order to keep sim mat nonnegative
            topic_term_weights_sim = (topic_term_weights_sim -
                                      topic_term_weights_sim.min())
            # compute Laplacian matrice and its 2nd eigenvector
            L = np.diag(sum(topic_term_weights_sim,
                            1)) - topic_term_weights_sim
            D, V = np.linalg.eigh(L)
            D = D[np.argsort(D)]
            V = V[:, np.argsort(D)]
            fiedler = V[:, 1]
            # get permutation corresponding to sorting the 2nd eigenvector
            term_inds = [term_inds[i] for i in np.argsort(fiedler)]
        else:
            raise ValueError(
                errors.value_invalid_msg(
                    "sort_terms_by",
                    sort_terms_by,
                    {"weight", "index", "alphabetical", "seriation"},
                ))

        # get topic and term labels
        topic_labels = tuple("topic {}".format(topic_ind)
                             for topic_ind in topic_inds)
        term_labels = tuple(id2term[term_ind] for term_ind in term_inds)

        # get topic-term weights to size dots
        term_topic_weights = np.array([
            self.model.components_[topic_ind][term_inds]
            for topic_ind in topic_inds
        ]).T

        return viz.draw_termite_plot(
            term_topic_weights,
            topic_labels,
            term_labels,
            highlight_cols=highlight_cols,
            save=save,
            rc_params=rc_params,
        )
コード例 #37
0
ファイル: big_svd.py プロジェクト: AntonioCarta/mslmn
    for c in reversed(range(C)):
        if s[c, c] > min_sigma:
            break
    # TODO: this can change the dimension.
    # either fill with zeros or output some warning.
    v[:, c:] = 0
    s[c:, c:] = 0
    u_t[c:, :] = 0
    return v, s, u_t


if __name__ == "__main__":
    n = 100
    p = 100
    m = np.random.rand(n, n)
    (v, s, u_t) = SvdForBigData(m, p + 1, p + 1)
    # print(v)
    vv, _, _ = np.linalg.svd(m)
    # print(v - vv[:, :p])

    mv = ortho_group.rvs(dim=n)
    mu = ortho_group.rvs(dim=n)
    s = np.random.randn(n)
    s[p:] = 0
    s = np.diag(s)
    A = mv @ s @ mu

    a, b, c = SvdForBigData(A, p + 1, p + 1)
    c = c[:, :p]
    assert la.norm(A[:, :p] - (a @ b @ c)) / la.norm(A[:, :p]) < 0.01
コード例 #38
0
 def symmetrize(self, a):
     return a + a.T - np.diag(a.diagonal())
コード例 #39
0
ファイル: balldrop_test.py プロジェクト: gehly/metis
def test_odeint():

    intfcn = ifunc.int_twobody

    X = np.reshape([7000., 0., 500., 0., 7.5, 0.], (6, 1))
    P = np.diag([1., 1., 1., 1e-6, 1e-6, 1e-6])

    inputs = {}
    inputs['GM'] = 3.986e5

    int_tol = 1e-8

    tin = [0., 10.]

    # Two Body
    int0 = X.flatten()
    intout = odeint(intfcn,
                    int0,
                    tin,
                    args=(inputs, ),
                    rtol=int_tol,
                    atol=int_tol)

    print 'Normal Exec'
    print intout
    x1 = np.reshape(intout[-1, :], (6, 1))
    print x1

    print
    print

    # UKF Style
    L = 6
    pnorm = 2.
    alpha = 1.

    # Prior information about the distribution
    kurt = gamma(5. / pnorm) * gamma(1. / pnorm) / (gamma(3. / pnorm)**2.)
    beta = kurt - 1.
    kappa = kurt - float(L)

    # Compute sigma point weights
    lam = alpha**2. * (L + kappa) - L
    gam = np.sqrt(L + lam)

    sqP = np.linalg.cholesky(P)
    Xrep = np.tile(X, (1, L))
    chi = np.concatenate((X, Xrep + (gam * sqP), Xrep - (gam * sqP)), axis=1)
    chi_v = np.reshape(chi, (L * (2 * L + 1), 1), order='F')

    intfcn = ifunc.int_twobody_ukf
    int0 = chi_v.flatten()

    intout = odeint(intfcn,
                    int0,
                    tin,
                    args=(inputs, ),
                    rtol=int_tol,
                    atol=int_tol)

    print 'UKF Exec'
    print intout
    x2 = np.reshape(intout[-1, 0:6], (6, 1))
    print x2

    test = x2 - x1
    print test

    return
コード例 #40
0
    def fit(self, TS, threshold_type='range', **kwargs):
        """
        Given a (N,L) time series, infer inter-node coupling weights using a
        Thouless-Anderson-Palmer mean field approximation.
        After [this tutorial]
        (https://github.com/nihcompmed/network-inference/blob/master/sphinx/codesource/inference.py)
        in python.

        From the paper: "Similar to naive mean field, TAP works well only in
        the regime of large sample sizes and small coupling variability.
        However, this method leads to poor inference results in the regime
        of small sample sizes and/or large coupling variability."

        Params
        ------
        TS (np.ndarray): Array consisting of $L$ observations from $N$ sensors.
        threshold_type (str): Which thresholding function to use on the matrix of
        weights. See `netrd.utilities.threshold.py` for documentation. Pass additional
        arguments to the thresholder using `**kwargs`.

        Returns
        -------
        G (nx.Graph or nx.DiGraph): a reconstructed graph.

        """

        N, L = np.shape(TS)             # N nodes, length L
        m = np.mean(TS, axis=1)         # empirical value

        # A matrix
        A = 1 - m**2
        A_inv = np.diag(1 / A)
        A = np.diag(A)
        ds = TS.T - m                   # equal time correlation
        C = np.cov(ds, rowvar=False, bias=True)
        C_inv = linalg.inv(C)
        
        s1 = TS[:,1:]                   # one-step-delayed correlation

        ds1 = s1.T - np.mean(s1, axis=1)
        D = cross_cov(ds1, ds[:-1])

        # predict naive mean field W:
        B = np.dot(D, C_inv)
        W_NMF = np.dot(A_inv, B)

        # TAP part: solving for Fi in the following equation
        # F(1-F)**2) = (1-m**2)sum_j W_NMF**2(1-m**2) ==> 0<F<1

        step = 0.001
        nloop = int(0.33 / step) + 2

        W2_NMF = W_NMF**2

        temp = np.empty(N)
        F = np.empty(N)

        for i in range(N):
            temp[i] = (1 - m[i]**2) * np.sum(W2_NMF[i, :] * (1 - m[:]**2))

            y = -1.
            iloop = 0

            while y < 0 and iloop < nloop:
                x = iloop * step
                y = x * (1 - x)**2 - temp[i]
                iloop += 1

            F[i] = x

        # A_TAP matrix
        A_TAP = np.empty(N)
        for i in range(N):
            A_TAP[i] = A[i, i] * (1 - F[i])

        A_TAP_inv = np.diag(1 / A_TAP)

        # predict W:
        W = np.dot(A_TAP_inv, B)
        self.results['matrix'] = W

        # threshold the network
        W_thresh = threshold(W, threshold_type, **kwargs)
        self.results['thresholded_matrix'] = W_thresh

        # construct the network
        self.results['graph'] = create_graph(W_thresh)
        G = self.results['graph']


        return G
コード例 #41
0
ファイル: nn.py プロジェクト: vrash/ml-class
def err(X,y,TH):
    m = X.shape[0]
    htx = ff(X,TH)[-1]
    ynn = htx.argmax(axis=1) +1 # if 10th el is max, then '0'
    cfmtx = confusion_matrix(y.T,ynn.T)
    return 1 -float(sum(np.diag(cfmtx)))/float(m)
コード例 #42
0
ファイル: balldrop_test.py プロジェクト: gehly/metis
def balldrop_setup():

    # Define inputs
    acc = 1.  #m/s^2
    inputs = {}
    inputs['acc'] = acc
    inputs['Q'] = np.diag([1e-12, 1e-12])
    int_tol = 1e-12

    # Time vector
    ti_list = np.arange(0., 100.1, 1.)

    # Inital State
    X_true = np.array([[0.], [0.]])
    P = np.array([[4., 0.], [0., 1.]])
    pert_vect = np.multiply(np.sqrt(np.diag(P)), np.random.randn(2))
    X_init = X_true + np.reshape(pert_vect, (2, 1))

    state_dict = {}
    state_dict[ti_list[0]] = {}
    state_dict[ti_list[0]]['X'] = X_init
    state_dict[ti_list[0]]['P'] = P

    # Outlier indices
    percent_outliers = 30.
    n_outliers = int(round(0.01 * percent_outliers * len(ti_list)))
    outlier_inds = []
    for ii in xrange(n_outliers):
        loop_pass = False
        while not loop_pass:
            ind = int(floor(np.random.rand() * len(ti_list)))
            if ind not in outlier_inds:
                outlier_inds.append(ind)
                loop_pass = True

    print 'outlier inds', outlier_inds

    # Generate truth and measurements
    truth_dict = {}
    meas_dict = {}
    sig_y = 0.01
    sig_dy = 0.001
    meas_dict['sigma_dict'] = {}
    meas_dict['sigma_dict']['y'] = sig_y
    meas_dict['sigma_dict']['dy'] = sig_dy
    meas_dict['meas_types'] = ['y', 'dy']
    meas_dict['meas'] = {}
    X = copy.copy(X_true)
    for ii in xrange(len(ti_list)):

        Xo = copy.copy(X)
        int0 = Xo.flatten()

        if ii > 0:
            tin = [ti_list[ii - 1], ti_list[ii]]
            intout = odeint(int_balldrop,
                            int0,
                            tin,
                            args=(inputs, ),
                            rtol=int_tol,
                            atol=int_tol)

            X = np.reshape(intout[-1, :], (2, 1))

        truth_dict[ti_list[ii]] = X

        if ii in outlier_inds:
            y_noise = 100. * sig_y * np.random.randn()
        else:
            y_noise = sig_y * np.random.randn()

        dy_noise = sig_dy * np.random.randn()

        y_meas = float(X[0] + y_noise)
        dy_meas = float(X[1] + dy_noise)
        meas_dict['meas'][ti_list[ii]] = np.array([[y_meas], [dy_meas]])

    #print state_dict
    #print truth_dict
    #print meas_dict

    # Save Data
    fdir = 'C:\Users\Steve\Documents\\research\lp_norm\lp_ukf\\test\\balldrop'

    #    fname = 'balldrop_inputs_and_truth.pkl'
    #    inputs_file = os.path.join(fdir, fname)
    #    pklFile = open(inputs_file, 'wb')
    #    pickle.dump([state_dict, inputs, truth_dict], pklFile, -1)
    #    pklFile.close()

    fname = 'balldrop_meas_' + str(int(n_outliers)).zfill(2) + 'p.pkl'
    meas_file = os.path.join(fdir, fname)
    pklFile = open(meas_file, 'wb')
    pickle.dump([meas_dict], pklFile, -1)
    pklFile.close()

    return
コード例 #43
0
ファイル: hamiltonian.py プロジェクト: salilab/hmc
def is_approx_diagonal_matrix(M):
    M = np.asarray(M)
    return np.all(np.isclose(M, np.diag(np.diag(M))))
コード例 #44
0
x_true = lambda t : C1 * np.sin(t) + C2 * np.cos(t) - (1/3) * np.cos(4*t)

# Boundaries
x0 = 1
T = 6
xT = 0.5

# a)

dt = 0.1
t = np.arange(0, T + dt, dt)
n = t.size

v = -2 * np.ones(n - 2)
u = np.ones(n - 3)
A = (1 / dt ** 2) * (np.diag(v) + np.diag(u, 1) + np.diag(u, -1))
A += np.eye(n - 2)

b = 5 * np.cos(4 * t[1:-1])
b[0] = b[0] - x0 / dt ** 2
b[-1] = b[-1] - xT / dt ** 2
b = b.reshape((-1, 1))

A9 = A.copy()
A10 = b.copy()

# b)

x_int = scipy.linalg.solve(A, b)

x = np.zeros(n)
コード例 #45
0
def ini_param(train_x, nmix, ini_mode):
    
    #Get the number of audio files and ndims
    nfiles = len(train_x)
    ndim = train_x[0].shape[1]
    
    #Initialize weight by random number
    w = np.random.rand(nmix) #w: vector(nmix)
    w = w / np.sum(w)
    
    #Initialize mean by global average
    total_frames = 0
    gm = np.zeros(ndim)
    for d in range(nfiles):
        nframes = train_x[d].shape[0]
        total_frames = total_frames + nframes
        gm = gm + np.sum(train_x[d], axis=0)
    gm = gm / total_frames #gm: vector(ndim)
    m = gm[np.newaxis, :]
    m = np.tile(m, (nmix, 1)) #m: matrix(nmix x ndim)
    
    #Initialize covariance by global average
    cov = np.zeros((nmix, ndim, ndim))
    for d in range(nfiles):
        nframes = train_x[d].shape[0]
        gm2 = gm[np.newaxis, :]
        gm2 = np.tile(gm2, (nframes, 1)) #gm2: matrix(nframes x ndim)
        Ones = np.ones((nmix, nframes)) #Use "Ones" to get uniform summation of (x-m)^2
        cov = cov + sum_outer_product(Ones, train_x[d] - gm2) #cov: tensor(nmix x ndim x ndim)
    cov = cov / total_frames
    
    #In case of "kmeans" initialization
    if ini_mode[0:6] == "kmeans":
        #Average with time-frames
        x = np.zeros((nfiles, ndim))
        for d in range(nfiles):
            x[d, :] = np.average(train_x[d], axis=0)
        
        #Initialize mean by k-means++ or normal k-means
        if ini_mode == "kmeans++":
            clf = KMeans(n_clusters=nmix, init='k-means++', n_jobs=4)
        else:
            clf = KMeans(n_clusters=nmix, init='random', n_jobs=4)
        m = clf.fit(x).cluster_centers_
    
    #In case of "random" initialization
    elif ini_mode == "random":
        #Calculate the sigma
        sigma = np.sqrt(np.diag(cov[0])) #sigma: vector(ndim)
        sigma = sigma[np.newaxis, :]
        sigma = np.tile(sigma, (nmix, 1)) #sigma: matrix(nmix x ndim)
        
        #Initialize mean by random number
        rand_mat = 2 * np.random.rand(nmix, ndim) - 1 #random value between -1 and +1
        m = m + 0.5 * rand_mat * sigma
    
    #Others
    else:
        print("UBM_ini should be 'kmeans++' or 'kmeans' or 'random'.")
        sys.exit()
    
    return w, m, cov
コード例 #46
0
from src.pdf import fitKDE
from src.risk import getRndCov, cov2corr

# plt.rcParams.update({
#     "text.usetex": True,
#     "font.family": "sans-serif",
#     "font.sans-serif": ["Helvetica"]})

alpha, nCols, nFact, q = .995, 1000, 100, 10
cov = np.cov(np.random.normal(size=(nCols * q, nCols)), rowvar=0)
cov = alpha * cov + (1 - alpha) * getRndCov(nCols, nFact)  # noise+signal
corr0 = cov2corr(cov)
eVal0, eVec0 = getPCA(corr0)

# minimize the squared diff between the ML-distribution and the kde, optim param is sigma**2
eMax0, var0 = findMaxEval(np.diag(eVal0), q, .01)
nFacts0 = eVal0.shape[0] - np.diag(eVal0)[::-1].searchsorted(eMax0)

print(nFacts0)
pdf0 = mpPDF(var0, q=q, pts=nCols)
# pdf1 = fitKDE(np.diag(eVal0), bWidth=.01)  # empirical pdf


plt.figure()
plt.plot(list(pdf0.index), pdf0, 'b')
# plt.plot(list(pdf1.index), pdf1, 'r')
plt.hist(np.diag(eVal0), density=True, bins=100)
# plt.legend(['Marcenko-Pastur', 'Empirical:KDE', 'Empirical'])
plt.legend(['Marcenko-Pastur', 'Empirical'])
plt.ylabel('prob[\lambda]')
plt.xlabel('\lambda')
コード例 #47
0
def pls(X,Y,var_norm=1,Nbs=500,Nperm=500,k=1,pval=1.00,pMatch=1):
    
    nsub = np.size(X,axis = 0)
    
    
    #--------------initial svd---------------
    Ux, Dx, Vx = np.linalg.svd(X,full_matrices=False)
    Uy, Dy, Vy = np.linalg.svd(Y,full_matrices=False)
    Umix, Dmix, Vmix = np.linalg.svd(np.diag(Dx.T) @ Ux.T @ Uy @ np.diag(Dy),full_matrices=False)
    salience_x = Vx.T @ Umix
    salience_y = Vy.T @ Vmix
    del Vx, Vy
    Dpct = Dmix*100//sum(Dmix)
    
    # -------------permutation test-----------------
    Dperm = np.zeros([Nperm,Dmix.size])
    for ii in range(Nperm):
        iperm = np.random.permutation(nsub)
        Umix, Dmix, Vmix = np.linalg.svd(np.diag(Dx.T) @ Ux[iperm,:].T @ Uy @ np.diag(Dy),full_matrices=False)
        Dperm[ii,:] = Dmix.T
        
    pSig = np.mean(((np.tile(Dmix,(Nperm,1)) > Dperm)*1),0)
    iSig = np.where(pSig >= 1-pval)[0]
    
    # discarding non-significant components
    salience_x = salience_x[:,iSig]
    salience_y = salience_y[:,iSig]
    
    
    #------------------------------------------------
    
    latent_x = X @ salience_x
    latent_y = Y @ salience_y
        
    # -------------bootsratp-----------------    
    
    M_salience_x = np.zeros(salience_x.shape)
    M_salience_y = np.zeros(salience_y.shape)
    R_salience_x = np.zeros(salience_x.shape) 
    R_salience_y = np.zeros(salience_y.shape)
   
        
    for b in range(Nbs):
        
        bsIdx= np.random.choice(range(nsub), size=nsub, replace=True)
        bUx, bDx, bVx = np.linalg.svd(X[bsIdx,:],full_matrices=False)
        bUy, bDy, bVy = np.linalg.svd(Y[bsIdx,:],full_matrices=False)
        bUmix, bDmix, bVmix = np.linalg.svd(np.diag(bDx.T) @ bUx.T @ bUy @ np.diag(bDy),full_matrices=False)
        bsalience_x = bVx.T @ bUmix
        bsalience_y = bVy.T @ bVmix
        
        if pMatch == 1 :
            ind, sgn = eigen_sort(salience_x,bsalience_x)
        else :
            ind, sgn = eigen_sort(salience_y,bsalience_y)
            
        bsalience_x = bsalience_x[:,ind] @ np.diag(sgn)
        bsalience_y = bsalience_y[:,ind] @ np.diag(sgn)
        
        M_salience_x = M_salience_x + bsalience_x
        M_salience_y = M_salience_y + bsalience_y
        R_salience_x = R_salience_x + np.power(bsalience_x,2)
        R_salience_y = R_salience_y + np.power(bsalience_y,2)
        
    mu_salience_x = M_salience_x/Nbs
    mu_salience_y = M_salience_y/Nbs
    var_salience_x = R_salience_x/Nbs - np.power(mu_salience_x,2)
    var_salience_y = R_salience_y/Nbs - np.power(mu_salience_y,2)
    bse_salience_x = var_salience_x *(Nbs/(Nbs-1))
    bse_salience_y = var_salience_y *(Nbs/(Nbs-1))
    zsalience_x = mu_salience_x / np.sqrt(bse_salience_x)
    zsalience_y = mu_salience_y / np.sqrt(bse_salience_y)
    
       
    return salience_x,salience_y, latent_x, latent_y, zsalience_x, zsalience_y, pSig, Dpct 
コード例 #48
0
                      full_output=0)
print(newtheta, logPosterior(newtheta, data, labels))

if newtheta.shape == (1, 3):
    print("l'ottimizzatore fa i capricci, cambio dimensioni")
    newtheta = newtheta[0]

xstar = np.reshape(np.linspace(-5, 5, 100), (100, 1))
kstar = kernel(xstar, data, newtheta, wantderiv=False, measnoise=False)
K = kernel(data, data, newtheta, wantderiv=False)
kstarstar = kernel(xstar, xstar, newtheta, wantderiv=False, measnoise=False)
kstarstar_diag = kstarstar.diagonal()

invk = np.linalg.inv(K)
mean = np.dot(kstar, np.dot(invk, labels))
var = kstarstar_diag - np.diag(np.dot(kstar, np.dot(invk, kstar.transpose())))
var = np.reshape(var, (100, 1))
#%%
# PLOTS:
xstar_p = np.squeeze(xstar)
mean_p = np.squeeze(mean)
var_p = np.squeeze(np.reshape(var, (len(xstar), 1)))
s = np.sqrt(var_p)

pl.figure()
pl.clf()

pl.gca().fill_between(xstar_p, mean_p - 2 * s, mean_p + 2 * s, color="#dddddd")
latent, = pl.plot(xstar, mean, 'r--', lw=2, label="latent")
pl.title('')
loglikelihood = logPosterior(newtheta, data, labels)
コード例 #49
0
ファイル: Class_Vit_Choc.py プロジェクト: nsaura/ML
    def __init__ (self, parser):
        """
        This object has been made to solve optimization problem.
        """
#        np.random.seed(1000) ; #plt.ion()
#        if parser.cov_mod not in ['full', 'diag'] :
#            raise AttributeError("\x1b[7;1;255mcov_mod must be either diag or full\x1b[0m")
        Nx,  Nt   =   parser.Nx, parser.Nt,    
        CFL, nu   =   parser.CFL,     parser.nu
        
        L = parser.L
        dx = L/(Nx-1)
        
        dt = {"dt_v" : CFL / nu * dx**2,
              "dt_l" : CFL*dx}
        
        if dt["dt_v"] < dt["dt_l"] :
            dt = dt["dt_v"]
            print ("dt visqueux")
        else :
            dt = dt["dt_l"]
            print ("dt lineaire")
                
        fac = nu*dt/dx**2
        tf = Nt * dt
        
        r = dt / dx
        
        datapath    =   osp.abspath(parser.datapath)
        num_real    =   parser.num_real
        cpt_max_adj =   parser.cpt_max_adj
        cov_mod     =   parser.cov_mod
        g_sup_max   =   parser.g_sup_max
        itmax       =   parser.itmax
        typeJ       =   parser.typeJ
        
        self.beta_prior = np.asarray([parser.beta_prior for i in range(Nx)]) 
        
        ## Matrices des coefficients pour la résolution
        ## Attention ces matrices ne prennent pas les points où sont définies les conditions initiales
        ## Ces cas doivent faire l'objet de méthodes particulières avec la redéfinition des fonctions A1 et A2 
        
        #####
        INF1 = np.diag(np.transpose([-fac/2 for i in range(Nx-3)]), -1)
        SUP1 = np.diag(np.transpose([-fac/2 for i in range(Nx-3)]), 1) 
        A_diag1 = np.diag(np.transpose([(1 + fac) for i in range(Nx-2)])) 
        
        In1 = A_diag1 + INF1 + SUP1
        
        A1 = np.zeros((Nx,Nx))
        
        A1[0,0] = A1[-1,-1] = 1
        
        A1[1:Nx-1, 1:Nx-1] = In1
        #####
        self.A1 = A1
        
        bruits = [0.0009 * np.random.randn(Nx) for time in range(num_real)]
        self.bruits = bruits
        
        self.line_x = np.linspace(0, L, Nx)

        self.cpt_max_adj = cpt_max_adj
        self.g_sup_max = g_sup_max  
        self.itmax = itmax        

        self.num_real = num_real
        self.cov_mod = cov_mod
        
        r = dt/dx
        
        self.L ,    self.tf     =   L , tf
        self.nu,    self.CFL    =   nu, CFL
        self.dx,    self.dt     =   dx, dt        
        self.Nx,    self.Nt     =   Nx, Nt
        self.fac,   self.r      =   fac, r
        
        self.nu_str = str(self.nu).replace(".","_")
        self.CFL_str = str(self.CFL).replace(".","_")
        self.type_init = parser.type_init
        
        bool_method = dict()
        bool_written= dict()
        
        if osp.exists(datapath) == False :
            os.mkdir(datapath)
        
        bmatrice_path = osp.join(datapath, "burger_matrices")
        case_path = osp.join(bmatrice_path, "cas_%s" % typeJ)
        
        self.cov_path = osp.join(case_path, "post_cov")
        self.beta_path = osp.join(case_path, "betas")
        self.chol_path = osp.join(case_path, "cholesky")
        self.inferred_U = osp.join(case_path, "U")
        
        if osp.exists(osp.join(case_path)) == False :
            os.makedirs(case_path)
        
        if osp.exists(self.inferred_U) == False :
            os.mkdir(self.cov_path)
            os.mkdir(self.chol_path)
            os.mkdir(self.beta_path)
            os.mkdir(self.inferred_U)

        self.datapath   =   datapath
        
        self.beta_name = lambda nx, nt, nu, type_i, CFL, it : osp.join(self.beta_path,\
            "beta_Nx:{}_Nt:{}_nu:{}_".format(nx, nt, nu) + "typei:{}_CFL:{}_it:{:03}.npy".format(type_i, CFL, it))
        
        self.u_name = lambda nx, nt, nu, type_i, CFL, it : osp.join(self.inferred_U,\
            "U_Nx:{}_Nt:{}_nu:{}_".format(nx, nt, nu) + "typei:{}_CFL:{}_it:{:03}.npy".format(type_i, CFL, it))
        
        self.chol_name = lambda nx, nt, nu, type_i, CFL, it : osp.join(self.chol_path,\
            "chol_Nx:{}_Nt:{}_nu:{}_".format(nx, nt, nu) + "typei:{}_CFL:{}_it:{:03}.npy".format(type_i, CFL, it))

        self.stats_done = False    

        self.typeJ = typeJ        
                        
        self.parser = parser
コード例 #50
0
def ap(S, maxits, convits, dampfact):
		n=S.shape[0]

		#Create empty Availability and Responsibility matrix and Exemplars list
		A=np.zeros((n, n))
		R=np.zeros((n, n))
		exemplars=[]
		count=0

		#start iterations
		for m in range(0, maxits):
		      # Compute responsibilities
			Rold = R
			AS = A + S
			Y= AS.max(1)
			I= AS.argmax(1)
			for i in range(n) :
				AS[i,I[i]] = -1000000
			Y2 = AS.max(1)
			I2 = AS.argmax(1)
			temp=np.repeat(Y, n).reshape(n, n)
			R = S - temp
			for i in range(n) :
				R[i,I[i]] = S[i,I[i]]-Y2[i]
			R = (1-dampfact)*R+dampfact*Rold


			# Compute availabilities
			Aold = A
			Rp = np.maximum(R,0)
			for i in range(n) :
				Rp[i,i] = R[i,i]
			temp2=np.ones((n,1))
			temp3=Rp.sum(0)
			A = np.kron(temp2, temp3)
			A= A-Rp
			diag = np.diag(A)
			A = np.minimum(A,0)
			for i in range(n) :
				A[i,i] = diag[i]
			A = (1-dampfact)*A + dampfact*Aold


			tempexemplars= []
			for i in range(0, n):
				if (R[i,i]+A[i,i])>0:
					tempexemplars.append(i)

			if(tempexemplars==exemplars):
				count=count+1
				if(count==convits):
					break
			else:
				count=0
				exemplars=list(tempexemplars)

		#Assigning datapoints to Exemplar
		assignment= np.zeros(n)

		for i in range(0,n):
			closest=0;
			currentbest=-1000000
			for j in range(0, len(exemplars)):
				if S[i,exemplars[j]]>currentbest:
					currentbest=S[i,exemplars[j]]
					closest=exemplars[j]
				if i==exemplars[j]:
					closest=exemplars[j]
					break
			assignment[i]=closest


		return assignment
コード例 #51
0
def admm(X, y, max_iter=5000):
    # solve by inner point method        
    m, n = X.shape
    X = np.column_stack((X, np.ones((m, 1))))
    y = y.astype(np.float64)
    data_num = len(y)
    C = 1.0
    kernel = np.dot(X, np.transpose(X))
    p = np.matrix(np.multiply(kernel,np.outer(y, y))) + np.diag(np.ones(data_num, np.float64)) * .5/C
    e = np.matrix(np.ones([data_num, 1], np.float64))

    bounds = (0, np.inf)    


    low, up = bounds    
    x = np.ones((m,1))
    tau = 1.618
    sigma = 1
    
    # initial 
    u = np.ones((m, 1))
    t = x
    A = p + sigma * np.eye(m)
    I = np.eye(m)
    invA = cg(A, I)
    for it in range(max_iter):
        # update x
        b = e + u + sigma * t
        x = invA * b
        
        # update y
        t = x - (1/sigma)*u
        t[t < low] = low
        t[t > up] = up
                    
        # update u
        u = u - tau*sigma*(x-t)

        dual = -(0.5*x.T*(p*x) - e.T*x)
        dual = dual.item()
        y1 = np.reshape(y, (-1, 1))
        lambda1 = np.multiply(x, y1)
        w = np.dot(X.T, lambda1)
        w = np.matrix(w).reshape(-1, 1)      
        tmp = np.maximum(1-np.multiply(y1, X*w),0)
        primal = 0.5*np.linalg.norm(w)**2 + 1 * np.sum(tmp)
        primal = primal.item()

        # stop criteria            
        if np.abs(dual-primal)/(1+np.abs(dual)+np.abs(primal)) < 1e-12:
            break

        # print(t, np.linalg.norm(gradient))
        # print(np.min(x), np.max(x))
        # print(np.sum(x < -1e-4), np.sum(x>1+1e-4))
        # print(np.abs(dual-primal)/(1+np.abs(dual)+np.abs(primal)))

    y1 = np.reshape(y, (-1, 1))
    alpha1 = x
    lambda1 = np.multiply(y1,alpha1)   
    w = np.dot(X.T, lambda1)
    w = np.array(w).reshape(-1)
    b = w[n]
    w = w[0:n]

    return w, b
コード例 #52
0
def predict(A,data,true_data=None,split=False,seed=1, silent=True): 
    #This is a basic OLS solver using matrix inversion to solve
    #the normal equations A^TAb = A^Ty. The argument A is the problems design
    #matrix, data is the vector corresponding to y.
    #If available true_data is noice free data tha can bes used for testing
    #purposes. 
    #The split argument determens if the function should call train_test_split
    #from sklearn package to split the data into testing and training sets.
    #The silent argument just determines if the function should print to
    #screen wha it does. Implemented for debugging
    
    #Determine whether or no to split data into training/testing sets
    if split:
        #Splits data using train_test _split from sklearn
        X_train, X_test, y_train, y_test = train_test_split(
                A,data,test_size=0.20,random_state=seed,shuffle=True)
        #Find betas using matrix (pseudo)inversion to solve normal
        #equations
        beta = (np.linalg.pinv(X_train.T@X_train)@X_train.T).dot(y_train)
        ytilde = X_train @ beta

        #Overwrites the y_test vector conaining noicy data
        #with the real function values/noiceless data
        tmp = train_test_split(A,true_data, test_size=0.20,
                               random_state=seed,shuffle=True)
        y_train_true = tmp[2]
        y_test_true = tmp[3]
        ypredict = X_test @ beta
        msetrain = MSE(y_train_true,ytilde)
        msetest = MSE(y_test_true,ypredict)
        r2train = R2(y_train_true,ytilde)
        r2test = R2(y_test_true,ypredict)
        if not silent:
            #If one wants to the metrics as they are calculated
            #set silent=False
            print("MSE value on training set:", msetrain)
            print("R2 value on training set:", r2train)
            print("MSE value on testing set:", msetest)
            print("R2 value on testing set:", r2test)
        
        #Calculates unbiased estimator of sigma^2, the variance
        #of the noice
        #Based on training predictions
        delta =y_train_true-ytilde
        sigma_squared = delta.dot(delta)/(len(y_train_true)-len(beta)-1)
        #Estimate variance of betas in training set
        M = np.diag(np.linalg.pinv(X_train.T@X_train))
        var_beta = sigma_squared*np.sqrt(M)
        
        #Retun a dictionary of metrics for training and
        #testing predictions
        return {"beta":beta, "msetest":msetest,"R2":r2test,"var":var_beta
                ,"R2train":r2train,"msetrain":msetrain}
    else:
        #If split argument is not true then train_test_split is not employed
        #and all data is used both as testing and training data.
        
        #Find betas using matrix (pseudo)inversion to solve normal
        #equations
        beta = (np.linalg.pinv(A.T@A)@A.T).dot(data)
        ytilde = A @ beta
        msetest = MSE(true_data,ytilde)
        r2 = R2(true_data,ytilde)
        #Calculates unbiased estimator of sigma^2, the variance
        #of the noice
        delta = true_data-ytilde
        sigma_squared = delta.dot(delta)/(len(true_data)-len(beta)-1)
        #Estimate variance of betas
        M = np.diag(np.linalg.pinv(A.T@A))
        var_beta = sigma_squared*np.sqrt(M)
        if not silent:
            print("MSE value, no training split:", msetest)
            print("R2 value, no training split:", r2)
        return {"beta":beta, "msetest":msetest,"R2":r2,"var":var_beta}
コード例 #53
0
XF = Xtrain
#vars
yf = ytrain.as_matrix()
XF = XF.as_matrix()
XFT = np.transpose(XF)
#calculo de coeficiente
XTXi = np.matrix(np.dot(XFT, XF)).getI()  #(XTX)-1
coef = np.dot(XTXi, XFT)  #(XTX)-1XT
coef = np.dot(coef, yf)  #B = (XTX)-1XTy
coef = np.transpose(coef)

yhat = np.dot(XF, coef)
yhat = np.transpose(yhat)
#yf
var = yf - yhat
#print XF.shape
N = XF.shape[0]
p = XF.shape[1]
std = np.sqrt((1. / (N - p - 1)) * np.sum(np.square(var)))

print std

#print np.matrix(np.diag(XTXi))
zscore = coef / np.transpose(np.matrix(np.sqrt(np.diag(XTXi))))
zscore = zscore / std
#print zscore
#print zscore.shape

print coef
print zscore
コード例 #54
0
ファイル: gpc.py プロジェクト: zhangYongHong/scikit-learn
    def log_marginal_likelihood(self, theta=None, eval_gradient=False):
        """Returns log-marginal likelihood of theta for training data.

        Parameters
        ----------
        theta : array-like, shape = (n_kernel_params,) or None
            Kernel hyperparameters for which the log-marginal likelihood is
            evaluated. If None, the precomputed log_marginal_likelihood
            of ``self.kernel_.theta`` is returned.

        eval_gradient : bool, default: False
            If True, the gradient of the log-marginal likelihood with respect
            to the kernel hyperparameters at position theta is returned
            additionally. If True, theta must not be None.

        Returns
        -------
        log_likelihood : float
            Log-marginal likelihood of theta for training data.

        log_likelihood_gradient : array, shape = (n_kernel_params,), optional
            Gradient of the log-marginal likelihood with respect to the kernel
            hyperparameters at position theta.
            Only returned when eval_gradient is True.
        """
        if theta is None:
            if eval_gradient:
                raise ValueError(
                    "Gradient can only be evaluated for theta!=None")
            return self.log_marginal_likelihood_value_

        kernel = self.kernel_.clone_with_theta(theta)

        if eval_gradient:
            K, K_gradient = kernel(self.X_train_, eval_gradient=True)
        else:
            K = kernel(self.X_train_)

        # Compute log-marginal-likelihood Z and also store some temporaries
        # which can be reused for computing Z's gradient
        Z, (pi, W_sr, L, b, a) = \
            self._posterior_mode(K, return_temporaries=True)

        if not eval_gradient:
            return Z

        # Compute gradient based on Algorithm 5.1 of GPML
        d_Z = np.empty(theta.shape[0])
        # XXX: Get rid of the np.diag() in the next line
        R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr))  # Line 7
        C = solve(L, W_sr[:, np.newaxis] * K)  # Line 8
        # Line 9: (use einsum to compute np.diag(C.T.dot(C))))
        s_2 = -0.5 * (np.diag(K) - np.einsum('ij, ij -> j', C, C)) \
            * (pi * (1 - pi) * (1 - 2 * pi))  # third derivative

        for j in range(d_Z.shape[0]):
            C = K_gradient[:, :, j]  # Line 11
            # Line 12: (R.T.ravel().dot(C.ravel()) = np.trace(R.dot(C)))
            s_1 = .5 * a.T.dot(C).dot(a) - .5 * R.T.ravel().dot(C.ravel())

            b = C.dot(self.y_train_ - pi)  # Line 13
            s_3 = b - K.dot(R.dot(b))  # Line 14

            d_Z[j] = s_1 + s_2.T.dot(s_3)  # Line 15

        return Z, d_Z
def run_pi_x_sq_trotter(x_max=5., nx=201, N_iter=7, beta_fin=4, potential=harmonic_potential,
                         potential_string =  'harmonic_potential', print_steps=True,
                         save_data=True, file_name=None, relevant_info=None,
                         plot=True, save_plot=True, show_plot=True):
    """
    Uso:    corre algoritmo matrix squaring iterativamente (N_iter veces). En la primera
            iteración se usa una matriz densidad en aproximación de Trotter a temperatura
            inversa beta_ini = beta_fin * 2**(-N_iter) para potencial dado por potential;
            en las siguientes iteraciones se usa matriz densidad generada por la iteración 
            inmediatamente anterior. Además ésta función guarda datos de pi(x;beta) vs. x
            en archivo de texto y grafica pi(x;beta) comparándolo con teoría para el oscilador 
            armónico cuántico.

    Recibe:
        x_max: float        ->  los valores de x estarán en el intervalo (-x_max,x_max).
        nx: int             ->  número de valores de x considerados.
        N_iter: int         ->  número de iteraciones del algoritmo matrix squaring.
        beta_ini: float     ->  valor de inverso de temperatura que queremos tener al final de
                                aplicar el algoritmo matrix squaring iterativamente. 
        potential: func     ->  potencial de interacción usado en aproximación de trotter. Debe 
                                ser función de x.
        potential_string: str   ->  nombre del potencial (con éste nombramos los archivos que
                                    se generan).
        print_steps: bool   ->  decide si imprime los pasos del algoritmo matrix squaring.
        save_data: bool     ->  decide si guarda los datos en archivo .csv.
        plot: bool          ->  decide si grafica.
        save_plot: bool     ->  decide si guarda la figura.
        show_plot: bool     ->  decide si muestra la figura en pantalla. 
    
    Devuelve:
        rho: numpy array, shape=(nx,nx)     ->  matriz densidad de estado rho a temperatura 
                                                inversa igual a beta_fin.
        trace_rho: float                    ->  traza de la matriz densidad a temperatura inversa
                                                igual a beta_fin. Por la definición que tomamos
                                                de "rho", ésta es equivalente a la función 
                                                partición en dicha temperatura.
        grid_x: numpy array, shape=(nx,)    ->  valores de x en los que está evaluada rho.
    """
    # Cálculo del valor de beta_ini según valores beta_fin y N_iter dados como input
    beta_ini = beta_fin * 2**(-N_iter)
    # Cálculo de rho con aproximación de Trotter
    rho, grid_x, dx = rho_trotter(x_max, nx, beta_ini, potential)
    # Aproximación de rho con matrix squaring iterado N_iter veces.
    rho, trace_rho, beta_fin_2 = density_matrix_squaring(   rho, grid_x, N_iter, 
                                                            beta_ini, print_steps   )
    print(  '----------------------------------------------------------------' +
            '--------------------------------------------------------\n'
            u'Matrix squaring: beta_ini = %.3f --> beta_fin = %.3f'%(beta_ini, beta_fin_2) +
            u'   N_iter = %d   Z(beta_fin) = Tr(rho(beta_fin)) = %.3E'%(N_iter,trace_rho))
    # Normalización de rho a 1 y cálculo de densidades de probabilidad para valores en grid_x.
    rho_normalized = np.copy(rho)/trace_rho
    x_weights = np.diag(rho_normalized)
    # Guarda datos en archivo .csv.
    script_dir = os.path.dirname(os.path.abspath(__file__)) #path completa para este script
    if save_data==True:
        # Nombre del archivo .csv en el que guardamos valores de pi(x;beta_fin).
        if file_name is None:
            csv_file_name = script_dir+u'/pi_x-ms-%s-beta_fin_%.3f-x_max_%.3f-nx_%d-N_iter_%d.csv'\
                                            %(potential_string,beta_fin,x_max,nx,N_iter)
        else:
            csv_file_name = script_dir + '/'+ file_name 
        # Información relevante para agregar como comentario al archivo csv.
        if relevant_info is None:
            relevant_info = [   'pi(x;beta_fin) computed using matrix squaring algorithm and' + \
                                ' Trotter approximation. Parameters:',
                                u'%s   x_max = %.3f   nx = %d   '%(potential_string,x_max,nx) + \
                                u'N_iter = %d   beta_ini = %.3f   '%(N_iter,beta_ini,) + \
                                u'beta_fin = %.3f'%beta_fin ]
        # Guardamos valores  de pi(x;beta_fin) en archivo csv.
        pi_x_data = [grid_x.copy(),x_weights.copy()]
        pi_x_data_headers = ['position_x','prob_density']
        pi_x_data = save_csv(pi_x_data,pi_x_data_headers,csv_file_name,relevant_info,print_data=0)

    # Gráfica y comparación con teoría
    if plot == True:
        plt.figure(figsize=(8,5))
        plt.plot(grid_x, x_weights, label = 'Matrix squaring +\nfórmula de Trotter.\n$N=%d$ iteraciones\n$dx=%.3E$'%(N_iter,dx))
        plt.plot(grid_x, QHO_canonical_ensemble(grid_x,beta_fin), label=u'Valor teórico QHO')
        plt.xlabel(u'x')
        plt.ylabel(u'$\pi^{(Q)}(x;\\beta)$')
        plt.legend(loc='best',title=u'$\\beta=%.2f$'%beta_fin)
        plt.tight_layout()
        if save_plot==True:
            if file_name is None:
                plot_file_name = script_dir+u'/pi_x-ms-plot-%s-beta_fin_%.3f-x_max_%.3f-nx_%d-N_iter_%d.eps'%(potential_string,beta_fin,x_max,nx,N_iter)
            else:
                plot_file_name = script_dir+u'/pi_x-ms-plot-'+file_name+'.eps'
            plt.savefig(plot_file_name)
        if show_plot==True:
            plt.show()
        plt.close()
    return rho, trace_rho, grid_x
コード例 #56
0
    def m_func(self, X_train, y):
        # 提出两个训练集的样本数和特征数
        n_samples, n_features = X_train.shape

        # 开辟一个n*n的矩阵,用于存放所有计算下来的核函数的值K(i,j)
        self.K = np.zeros((n_samples, n_samples))
        for i in range(n_samples):
            for j in range(n_samples):
                if self.kernel == 'polynomial':
                    self.K[i, j] = polynomial_kernel(X_train[i], X_train[j],self.P)
                elif self.kernel == 'gaussian':
                    self.K[i, j] = gaussian_kernel(X_train[i], X_train[j], self.sigma)
                else:
                    self.K[i, j] = linear_kernel(X_train[i], X_train[j])

            # print(K[i,j])

        X_train = np.asarray(X_train)

        # 有区别么?
        K1 = np.zeros((n_samples, n_samples))
        for i in range(n_samples):
            for j in range(n_samples):
                if self.kernel == 'polynomial':
                    self.K[i, j] = polynomial_kernel(X_train[i], X_train[j], self.P)
                elif self.kernel == 'gaussian':
                    self.K[i, j] = gaussian_kernel(X_train[i], X_train[j], self.sigma)
                else:
                    self.K[i, j] = linear_kernel(X_train[i], X_train[j])
            # print(K[i,j])
        # print(K1.shape)

        # P为公式中yi*yj*fi(xi)*fi(xj)
        P = cvxopt.matrix(np.outer(y, y) * self.K)

        # q为长度为训练样本数的-1向量
        q = cvxopt.matrix(np.ones(n_samples) * -1)
        
        #equality constraints
        # A为将列向量y变为横向量
        A = cvxopt.matrix(y, (1, n_samples))
        A = matrix(A, (1, n_samples), 'd')  # changes done
        # b = [0.0]
        b = cvxopt.matrix(0.0)
        #print(P,q,A,b)
        
        #inequality constraints
        if self.C is None:
            # G为对角线为n*n的对角线为-1的矩阵
            G = cvxopt.matrix(np.diag(np.ones(n_samples) * -1))
            # h = [0,0,0,...,0]
            h = cvxopt.matrix(np.zeros(n_samples))

        else:
            # tmp1 为n*n的对角线为-1的对角矩阵
            tmp1 = np.diag(np.ones(n_samples) * -1)
            # tmp2 为n*n的对角线为1的对角矩阵
            tmp2 = np.identity(n_samples)
            # G为2n*n的tmp1与tmp2的纵向堆叠
            G = cvxopt.matrix(np.vstack((tmp1, tmp2)))

            # h为2n*1的上一半为0,下一半为C的列向量
            tmp1 = np.zeros(n_samples)
            tmp2 = np.ones(n_samples) * self.C
            h = cvxopt.matrix(np.hstack((tmp1, tmp2)))

        # 解决QP问题,这里没看懂,应该是一个凸优化操作
        # solve QP problem
        solution = cvxopt.solvers.qp(P, q, G, h, A, b)
        # print(solution['status'])
        # Lagrange multipliers
        # 将solution['x']拉为一个向量,我大胆预测这里a就是参数阿法
        a = np.ravel(solution['x'])
        #print(a)
        # 这tm有什么区别
        a_org = np.ravel(solution['x'])
        # Support vectors have non zero lagrange multipliers
        # 这里我的理解是,阿法不会理想化变为0,所以设置一个阈值,大于1e-5的都为有效支持向量机参数
        # 这里sv为一个向量,里面都是true或者false
        sv = a > 1e-5
        # print(sv.shape)

        #         print(a)
        #         print(sv)
        #        print(a[sv])
        # 只挑出那些支持向量机,为sv
        ind = np.arange(len(a))[sv]
        self.a_org = a
        self.a = a[sv]
        self.sv = X_train[sv]
        self.sv_y = y[sv]
        self.sv_yorg = y
        X_train = np.asarray(X_train)
        b = 0

        # 这里没怎么弄明白,b是什么玩意
        for n in range(len(self.a)):
            b += self.sv_y[n]
            b -= np.sum(self.a * self.sv_y * self.K[ind[n], sv])
        b /= len(self.a)
        # print(self.a_org[1])
        # print(self.a_org.shape,self.sv_yorg.shape,K.shape)

        w_phi = 0
        total = 0

        # 大致公式应该是对应着论文里面的公式8,weigh的更新公式
        for n in range(len(self.a_org)):
            #print(self.a_org[n] * self.sv_yorg[n])
            w_phi = self.a_org[n] * self.sv_yorg[n] * K1[n]


        self.d_hyp = np.zeros(n_samples)
        # 这里对应的公式是21,计算d的
        for n in range(len(self.a_org)):
            self.d_hyp += self.sv_yorg[n] * (w_phi + b)
        func = np.zeros((n_samples))
        func = np.asarray(func)
        typ = 2

        # 这里对应公式22,计算fuzzy membership
        if (typ == 1):
            for i in range(n_samples):
                func[i] = 1 - (self.d_hyp[i] / (np.amax(self.d_hyp[i]) + 0.000001))
        beta = 0.8
        # 这里对应公式23
        if (typ == 2):
            for i in range(n_samples):
                func[i] = 2 / (1 + beta * self.d_hyp[i])

        # 这一块很奇怪,我不知道这个数字比例是怎么设置的,有待考量
        r_max = 700 / 700
        r_min = 1
        #print(func)
        self.m = func[0:701] * r_min
        #print(self.m)
        self.m = np.append(self.m, func[701:1401] * r_max)
コード例 #57
0
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 14 16:48:04 2018

@author: Thinkpad
"""
import numpy as np
#Numpy:
#
#1. How to add a border (filled with 0's) around an existing array?
a1 = np.pad(np.ones((2,2)),1,'constant')
#2. Create a 5x5 matrix with values 1,2,3,4 just below the diagonal  
a2 = np.diag(np.arange(4) + 1, -1)
#3. Create a 8x8 matrix and fill it with a checkerboard pattern 
a3 = np.zeros((8,8))
a3[::2,::2] = 1
a3[1::2,1::2] = 1
#4. Consider a (6,7,8) shape array, what is the index (x,y,z) of the 100th element 
a4 = np.unravel_index(100, (6,7,8))
#>>> np.unravel_index([22, 41, 37], (7,6))
#(array([3, 6, 6]), array([4, 5, 1]))
#>>> np.unravel_index([31, 41, 13], (7,6), order='F')
#(array([3, 6, 6]), array([4, 5, 1]))

#5. Create a checkerboard 8x8 matrix using the tile function
a5 = np.tile(a3, (2,2))
#6. How to add a border (filled with 0's) around an existing array? 
a6 = a1
#7. What is the result of the following expression? 
''' ? '''
#8. Create a 5x5 matrix with values 1,2,3,4 just below the diagonal 
コード例 #58
0
    def loss(self, X, y=None, reg=0.0):
        """
    Compute the loss and gradients for a two layer fully connected neural
    network.

    Inputs:
    - X: Input data of shape (N, D). Each X[i] is a training sample.
    - y: Vector of training labels. y[i] is the label for X[i], and each y[i] is
      an integer in the range 0 <= y[i] < C. This parameter is optional; if it
      is not passed then we only return scores, and if it is passed then we
      instead return the loss and gradients.
    - reg: Regularization strength.

    Returns:
    If y is None, return a matrix scores of shape (N, C) where scores[i, c] is
    the score for class c on input X[i].

    If y is not None, instead return a tuple of:
    - loss: Loss (data loss and regularization loss) for this batch of training
      samples.
    - grads: Dictionary mapping parameter names to gradients of those parameters
      with respect to the loss function; has the same keys as self.params.
    """
        # Unpack variables from the params dictionary
        W1, b1 = self.params['W1'], self.params['b1']
        W2, b2 = self.params['W2'], self.params['b2']
        N, D = X.shape

        # Compute the forward pass
        scores = None
        #############################################################################
        # TODO: Perform the forward pass, computing the class scores for the input. #
        # Store the result in the scores variable, which should be an array of      #
        # shape (N, C).                                                             #
        #############################################################################
        a = np.maximum(X.dot(W1) + b1, 0)
        scores = np.dot(a, W2) + b2
        #############################################################################
        #                              END OF YOUR CODE                             #
        #############################################################################

        # If the targets are not given then jump out, we're done
        if y is None:
            return scores

        # Compute the loss
        loss = None
        #############################################################################
        # TODO: Finish the forward pass, and compute the loss. This should include  #
        # both the data loss and L2 regularization for W1 and W2. Store the result  #
        # in the variable loss, which should be a scalar. Use the Softmax           #
        # classifier loss.                                                          #
        #############################################################################
        exp_scores = np.exp(scores)
        exp_sum = np.sum(exp_scores, axis=1)
        exp_correct = exp_scores[xrange(N), y[xrange(N)]]
        loss = np.sum(-np.log(exp_correct / exp_sum)) / N + reg * (
            np.sum(W1 * W1) + np.sum(W2 * W2))
        #############################################################################
        #                              END OF YOUR CODE                             #
        #############################################################################

        # Backward pass: compute gradients
        grads = {}
        #############################################################################
        # TODO: Compute the backward pass, computing the derivatives of the weights #
        # and biases. Store the results in the grads dictionary. For example,       #
        # grads['W1'] should store the gradient on W1, and be a matrix of same size #
        #############################################################################
        r = np.dot(np.diag(1 / exp_sum), exp_scores)
        r[xrange(N), y[xrange(N)]] -= 1
        db2 = np.sum(r, axis=0) / N
        dW2 = np.dot(a.T, r) / N + 2 * reg * W2
        t = (W2.dot(r.T)).T
        t[a <= 0] = 0
        db1 = np.sum(t, axis=0) / N
        dW1 = np.dot(X.T, t) / N + 2 * reg * W1
        grads = {'W1': dW1, 'W2': dW2, 'b1': db1, 'b2': db2}
        #############################################################################
        #                              END OF YOUR CODE                             #
        #############################################################################

        return loss, grads
    ax.set_xlabel(r'$x_0$', fontsize=18)
    ax.set_ylabel(r'$x_1$', fontsize=18)
    ax.legend(fontsize=16)
    ax.grid(True)

    plt.show()

    # Compute W
    W_rbf = np.zeros((nb_samples, nb_samples))

    for i in range(nb_samples):
        for j in range(nb_samples):
            W_rbf[i, j] = rbf(X[i], X[j])

    # Compute D and its inverse
    D_rbf = np.diag(np.sum(W_rbf, axis=1))
    D_rbf_inv = np.linalg.inv(D_rbf)

    # Perform the label propagation
    Yt = Y.copy()
    Y_prev = np.zeros((nb_samples, ))
    iterations = 0

    while np.linalg.norm(Yt - Y_prev, ord=1) > tolerance:
        P = np.dot(D_rbf_inv, W_rbf)
        Y_prev = Yt.copy()
        Yt = np.dot(P, Yt)
        #Yt[0:nb_samples - nb_unlabeled] = Y[0:nb_samples - nb_unlabeled]

    Y_final = np.sign(Yt)
コード例 #60
0
 def gradient_function(self, graph, f):
     links = int(np.max(graph[:, 0]) + 1)
     g = graph.dot(np.diag([1., 1., 1., 1., 1 / 2., 1 / 3., 1 / 4.,
                            1 / 5.]))
     x = np.power(f.reshape((links, 1)), np.array([1, 2, 3, 4, 5]))
     return np.sum(np.einsum('ij,ij->i', x, g[:, 3:]))