def ToStateSpace(M, K, G, T_u, T_w):
    shape = np.shape(M)
    A = np.block([[np.zeros((shape[0], shape[0])),
                   np.identity(shape[0])], [-np.inv(M) * K, -np.inv(M) * G]])
    B = np.block([[np.zeros((shape[0], 1)),
                   np.identity(shape[0])], [-np.inv(M) * K, -np.inv(M) * G]])
    return (sys)
Пример #2
0
def _getDirec(P, M, u=1, v=0):
    direc0 = np.dot(np.inv(P), [u, v, 1, 1])
    direc0 = direc0 / direc0[-1]
    orig0 = np.dot(np.inv(P), [u, v, -1, 1])
    direc0 = direc0 - orig0
    direc0 = direc0 / np.linalg.norm(direc0)
    return np.dot(np.inv(M), direc0)
Пример #3
0
def _getDirec(P, M, u=1, v=0):
    direc0 = np.dot(np.inv(P), [u, v, 1, 1])
    direc0 = direc0/direc0[-1];
    orig0 = np.dot(np.inv(P), [u, v, -1, 1]);
    direc0 = direc0-orig0;
    direc0 = direc0/np.linalg.norm(direc0)
    return np.dot(np.inv(M), direc0)
Пример #4
0
def polyfitu(x, y, dy, N):

    #n = len(y) # number of data points
    dysq = dy**2  # Speed up computation
    wtsq = 1. / dysq  # Speed up computation

    # Form the vector of the Sigma

    kk = np.linspace(1, N + 1, N + 1).reshape(-1, 1)
    Sigma_y = sum(y * x**(kk - 1) * wtsq, 1)

    Sigma_xx = np.zeros((N + 1, N + 1))
    for k in range(1, N + 1):
        for j in range(k, N + 1):
            Sigma_xx[j, k] = np.sum(x**(k - 1) * x**(j - 1) * wtsq)
            if k == j:
                Sigma_xx[k][j] = Sigma_xx[j][k]

    Sigma_xx_inv = np.inv(Sigma_xx)

    p = Sigma_y * Sigma_xx_inv

    xvector = p.zeros((N + 1, n))
    xvector[kk, :] = x**(kk - 1) * wtsq

    dpdy = Sigma_xx_inv * xvector

    CM = npzeros((N + 1, N + 1))
    for k in range(1, N + 1):
        for j in range(k, N + 1):
            CM[j, k] = np.sum(dpdy[k, :] * dpdy[j, :] * dysq)
            if k == j:
                CM[k, j] = CM[j, k]

    dp = np.sqrt(np.diag(CM))
Пример #5
0
def lfsrsolve(v, n):
    """Given a guess n for the length of the recurrence that generates
    the binary vector v, this function returns the coefficients of the
    recurrence."""
    v = v[:]

    vln = len(v)

    if (vln < 2 * n):
        raise ValueError('The vector v needs to be atleast length 2n')

    M = np.array(circmat(v, n))
    Mdet = np.det(M)

    x = v[n + 2:2 * n]

    Minv = np.inv(M)
    Minv = np.mod(np.round(Minv * Mdet), 2)
    # A note: Technically, the round() function should never show up, but
    # since Matlab does double precision arithmetic to calculate the inverse matrix
    # we need to bring the result back to integer values so we can perform a meaningful
    # mod operation. As long as this routine is not used on huge examples, it should
    # be ok

    y = np.mod(Minv * x, 2)
    y = y[:].T  # Convert the output to a row vector
    return y
Пример #6
0
def red_obs(sys, T, poles):
    """Reduced order observer of the system sys
    Call:
    obs=red_obs(sys,T,poles)
    Parameters
    ----------
    sys : System in State Space form
    T: Complement matrix
    poles: desired observer poles
    Returns
    -------
    obs: ss
    Reduced order Observer
    """
    if isinstance(sys, TransferFunction):
        "System must be in state space form"
        return
    a = np.mat(sys.A)
    b = np.mat(sys.B)
    c = np.mat(sys.C)
    d = np.mat(sys.D)
    T = np.mat(T)
    P = np.mat(np.vstack((c, T)))
    invP = np.inv(P)
    AA = P * a * invP
    ny = np.shape(c)[0]
    nx = np.shape(a)[0]
    nu = np.shape(b)[1]

    A11 = AA[0:ny, 0:ny]
    A12 = AA[0:ny, ny:nx]
    A21 = AA[ny:nx, 0:ny]
    A22 = AA[ny:nx, ny:nx]

    L1 = place(A22.T, A12.T, poles)
    L1 = np.mat(L1).T

    nn = nx - ny

    tmp1 = np.mat(np.hstack((-L1, np.eye(nn, nn))))
    tmp2 = np.mat(np.vstack((np.zeros((ny, nn)), np.eye(nn, nn))))
    Ar = tmp1 * P * a * invP * tmp2

    tmp3 = np.vstack((np.eye(ny, ny), L1))
    tmp3 = np.mat(np.hstack((P * b, P * a * invP * tmp3)))
    tmp4 = np.hstack((np.eye(nu, nu), np.zeros((nu, ny))))
    tmp5 = np.hstack((-d, np.eye(ny, ny)))
    tmp4 = np.mat(np.vstack((tmp4, tmp5)))

    Br = tmp1 * tmp3 * tmp4

    Cr = invP * tmp2

    tmp5 = np.hstack((np.zeros((ny, nu)), np.eye(ny, ny)))
    tmp6 = np.hstack((np.zeros((nn, nu)), L1))
    tmp5 = np.mat(np.vstack((tmp5, tmp6)))
    Dr = invP * tmp5 * tmp4

    obs = StateSpace(Ar, Br, Cr, Dr, sys.dt)
    return obs
Пример #7
0
def invMassMatrix(obj):
    """Returns the inverse of obj's generalized mass matrix
      [H 0 ]-1
      [0 mI]
    about the origin."""
    try:
	import numpy
    except ImportError:
	raise "invMassMatrix(obj) needs numpy"
    Hinv = numpy.zeros((6,6))
    if obj == None or isinstance(obj,TerrainModel):
        #infinite inertia
        return Hinv
    if isinstance(obj,RobotModel):
        return obj.getMassMatrixInv()
    m = obj.getMass()
    minv = 1.0/m.mass
    Hinv[3,3]=Hinv[4,4]=Hinv[5,5]=minv
    #offset the inertia matrix about the COM
    H = numpy.array((3,3))
    H[0,:] = numpy.array(m.inertia[0:3])
    H[1,:] = numpy.array(m.inertia[3:6])
    H[2,:] = numpy.array(m.inertia[6:9])
    H -= skew(m.com)*skew(m.com)*m.mass
    Hinv[0:3,0:3] = numpy.inv(H)
    return Hinv
Пример #8
0
def invMassMatrix(obj):
    """Returns the inverse of obj's generalized mass matrix

      [H 0 ]-1
      [0 mI]

    about the origin."""
    try:
        import numpy
    except ImportError:
        raise RuntimeError("invMassMatrix(obj) needs numpy")
    Hinv = numpy.zeros((6, 6))
    if obj == None or isinstance(obj, TerrainModel):
        #infinite inertia
        return Hinv
    if isinstance(obj, RobotModel):
        return obj.getMassMatrixInv()
    m = obj.getMass()
    minv = 1.0 / m.mass
    Hinv[3, 3] = Hinv[4, 4] = Hinv[5, 5] = minv
    #offset the inertia matrix about the COM
    H = numpy.array((3, 3))
    H[0, :] = numpy.array(m.inertia[0:3])
    H[1, :] = numpy.array(m.inertia[3:6])
    H[2, :] = numpy.array(m.inertia[6:9])
    H -= skew(m.com) * skew(m.com) * m.mass
    Hinv[0:3, 0:3] = numpy.inv(H)
    return Hinv
 def computeKalmanGain(self):
     # Using Eq (5.15) on Pg 127
     P = self.EstimationErrorCovarianceMatrix
     H = self.StateToMeasurementMatrix
     R = self.MeasurementNoiseCovarianceMatrix
     KalmanGain = P @ np.transpose(H) @ np.inv(R)
     self.KalmanGain = KalmanGain
Пример #10
0
def solve(policy):
    """
    Given a policy this function evaluates it
    """
    sim = Simulator()
    pr = sim.get_transition_model()
    rew = sim.get_reward_model()

    dim = len(rew)

    pa0 = np.squeeze( pr[:, 0, :])
    pa1 = np.squeeze( pr[:, 1, :])

    prob = np.zeros((dim, dim))
    for i in range(1, dim+1):
        a = policy.select_action(i)[0]
        if a == 0:
            prob[i,:] = pa0[i, :]
        else:
            prob[i,:] = pa1[i, :]

    v = np.inv(np.eye(dim) - policy.discount*prob).dot(rew)

    q1 = rew + policy.discount * pa0.dot(v)
    q2 = rew + policy.discount * pa1.dot(v)
    q = max(q1, q2)

    return v, q1, q2, q
Пример #11
0
def cramer_rao(model, p0, X, noise, show_plot=False):
    """Calulate inverse of the Fisher information matrix for model
    sampled on grid X with parameters p0. Assumes samples are not
    correlated and have equal variance noise^2.

    Parameters
    ----------
    model : callable
        The model function, f(x, ...).  It must take the independent
        variable as the first argument and the parameters as separate
        remaining arguments.
    X : array
        Grid where model is sampled.
    p0 : M-length sequence
        Point in parameter space where Fisher information matrix is
        evaluated.
        """
    labels = model
    p0dict = {'a': 2, 'b': 0, 'c': 8}  #dict(zip(['a', 'b', 'c'], p0))

    D = np.zeros((len(p0), X.size))
    for i, argname in enumerate(labels):
        D[i, :] = [
            derivative(lambda p: model(x, **dict(p0dict, **{argname: p})),
                       p0dict[argname],
                       dx=0.0001) for x in X
        ]

    I = 1 / noise**2 * np.einsum('mk,nk', D, D)
    iI = np.inv(I)
    return iI
Пример #12
0
    def sample_song(self, j):
        genres = self.genres[j]
        song = self.songs[j]
        sgc = self.song_genre_count[j]

        cdf = np.zeros(self.K)
        pdf = np.zeros(self.K)

        for i in xrange(len(song)):
            # Hold out this segment
            k_old = genres[i]
            self.timbre_totals[k_old] -= song[i]
            self.genre_count[k_old] -= 1
            sgc[k_old] -= 1

            # Compute p(k_ji | everything else)
            timbre_mu = self.genre_timbre_mean()
            pdf = (sgc + self.alpha) / (len(song) + self.K * self.alpha)
            (mu, sigma) = self.genre_timbre_posterior()
            # TODO: Improve the prior
            for k in xrange(self.K):
                dev = mu[k] - song[i]
                suf_stat = dev * np.inv(sigma) * dev.reshape(self.T, 1)
                pdf[k] *= (np.abs(sigma)**-0.5) * np.exp(-suf_stat)

            # Sample new genre
            np.cumsum(pdf, out=cdf)
            k_new = bisect(cdf, np.random.random() * cdf[-1])

            self.timbre_totals[k_new] += song[i]
            self.genre_count[k_new] += 1
            sgc[k_new] += 1
Пример #13
0
def gauss_pdf(X, M, S):

    if M.shape[1] == 1:

        DX = X - np.tile(M, X.shape[1])
        E = 0.5 * np.sum(DX * (np.dot(np.linalg.inv(S), DX)), axis=0)
        E = E + 0.5 * M.shape[0] * np.log(2 * np.pi) + 0.5 * np.log(
            np.linalg.det(S))
        P = np.exp(-E)

    elif X.shape[1] == 1:

        DX = np.tile(X, M.shape()[1]) - M
        E = 0.5 * np.sum(DX * (np.dot(np.linalg.inv(S), DX)), axis=0)
        E = E + 0.5 * M.shape[0] * np.log(2 * np.pi) + 0.5 * np.log(
            np.linalg.det(S))
        P = np.exp(-E)

    else:

        DX = X - M
        E = 0.5 * np.dot(DX.T, np.dot(np.inv(S), DX))
        E = E + 0.5 * M.shape[0] * np.log(2 * np.pi) + 0.5 * np.log(np.det(S))
        P = np.exp(-E)

    return (P[0], E[0])
Пример #14
0
def xyz2enu(u,v,w,head,pitch,roll,inverse=False):
    """Transforms volcities in XYZ coordinates to ENU, or vice versa if
    inverse=True. Transformation is done according to the Nortek
    convention
    """
    # convert to radians
    hh = np.pi*(head-90)/180
    pp = np.pi*pitch/180
    rr = np.pi*roll/180

    ut = np.NaN(shape(u))
    vt = np.NaN(shape(u))
    wt = np.NaN(shape(u))    

    for i in range(len(head)):
        # generate heading matrix
        H = np.matrix([[cos(hh[i]), sin(hh[i]), 0],[-sin(hh[i]), cos(hh[i]), 0],[0, 0, 1]])
        # generate combined pitch and roll matrix
        P = [[cos(pp[i]), -sin(pp[i])*sin(rr[i]), -cos(rr[i])*sin(pp[i])],
             [0,           cos(rr[i]),                       -sin(rr[i])],
             [sin(pp[i]),  sin(rr[i])*cos(pp[i]),  cos(pp[i])*cos(rr[i])]]

        R = H*P
        print(R)
        if(inverse):
            R = np.inv(R)

        # do transformation
        ut[i]  = R[0,0]*u[i] + R[0,1]*v[i] + R[0,2]*w[i];
        vt[i]  = R[1,0]*u[i] + R[1,1]*v[i] + R[1,2]*w[i];
        wt[i]  = R[2,0]*u[i] + R[2,1]*v[i] + R[2,2]*w[i];
Пример #15
0
    def sample_song(self, j):
        genres = self.genres[j]
        song = self.songs[j]
        sgc = self.song_genre_count[j]

        cdf = np.zeros(self.K)
        pdf = np.zeros(self.K)

        for i in xrange(len(song)):
            # Hold out this segment
            k_old = genres[i]
            self.timbre_totals[k_old] -= song[i]
            self.genre_count[k_old] -= 1
            sgc[k_old] -= 1

            # Compute p(k_ji | everything else)
            timbre_mu = self.genre_timbre_mean()
            pdf = (sgc + self.alpha) / (len(song) + self.K * self.alpha)
            (mu, sigma) = self.genre_timbre_posterior()
            # TODO: Improve the prior
            for k in xrange(self.K):
                dev = mu[k] - song[i]
                suf_stat = dev * np.inv(sigma) * dev.reshape(self.T, 1)
                pdf[k] *= (np.abs(sigma) ** -0.5) * np.exp(-suf_stat)

            # Sample new genre
            np.cumsum(pdf, out=cdf)
            k_new = bisect(cdf, np.random.random() * cdf[-1])

            self.timbre_totals[k_new] += song[i]
            self.genre_count[k_new] += 1
            sgc[k_new] += 1
Пример #16
0
 def solveNormal(self):
     """Sets A, b, C, D, and e to the result of a normal autoregression"""
     n = len(self.trials[0][0])
     mean0 = np.zeros(n)
     mean1 = np.zeros(n)
     mean2 = np.zeros(n)
     cov0 = np.zeros((n, n))
     cov = np.zeros((2 * n, 2 * n))
     num = 0
     for trial in self.trials:
         mean0 += trial[0]
         for (xi, xn) in zip(trial[:-1], trial[1:]):
             mean1 += xi
             mean2 += xn
             num += 1
     mean0 /= len(self.trials)
     mean1 /= num
     mean2 /= num
     for trial in self.trials:
         cov0 += np.outer(trial[0] - mean0, trial[0] - mean0)
         for (xi, xn) in zip(trial[:-1], trial[1:]):
             xin = np.hstack([xi - mean1, xn - mean2])
             cov += np.outer(xin, xin)
     cov0 /= len(self.trials)
     cov /= num
     cov11 = cov[0:n, 0:n]
     cov12 = cov[0:n, n:2 * n]
     cov22 = cov[n:2 * n, n:2 * n]
     cov11inv = np.linalg.inv(cov11)
     Aall = np.dot(cov12.T, cov11inv)
     ball = mean2 - np.dot(Aall, mean1)
     #Do schur complement
     #[x[t]] = [A11 A12]*[x[t-1]]+[b1]
     #[y[t]]   [A21 A22] [y[t-1]] [b2]
     #A11^-1 (x[t]-b1-A12 y[t-1]) = x[t-1]
     #y[t] =  A21 x[t-1] + A22 y[t-1] + b2
     #     = A21 A11^-1 x[t] + (A22 - A21 A11^-1 A12) y[t-1] + A21 A11^-1 b2
     xind = self.observedindices
     yind = self.unobservedindices
     A11inv = np.inv(Aall[xind, xind])
     self.A = np.dot(cov0[yobs, xobs], np.inv(cov0[xobs, xobs]))
     self.b = mean0[yobs] - np.dot(self.A, mean0[xobs])
     self.C = np.dot(Aall[yind, xind], A11inv)
     self.D = Aall[yind, yind]
     self.D -= np.dot(Aall[yind, xind], np.dot(A11inv, Aall[xind, yind]))
     self.e = ball[yind] - np.dot(self.C, ball[xind])
     return
Пример #17
0
def ilc_gary(Freq, dbeta):  # ilc recipies used by Gary
    Spec_d = fv_d_pl(Freq, dbeta)
    Spec_y = fv_y(Freq)
    # Spec_y = array([-1.506, -1.037, -0.001, 2.253])
    # Spec_y = np.array([-4.031, -2.785, 0.187, 6.205]) / Tcmb

    Nf = len(Freq)
    Sigma = np.ones(Nf)
    Ns = 3
    if (Nf < Ns):
        print "not enough freedom"
        sys.exit()

    # Populate the linear system to solve for the coefficients
    Matrix = np.zeros((Nf, Nf))
    Matrix[0] = Sigma
    Matrix[1] = Spec_d
    Matrix[2] = Spec_y
    RHS = np.zeros(Nf)
    RHS[2] = 1.

    # Soln = dot(pinv(Matrix), RHS)
    U, W, V = np.linalg.svd(Matrix)
    V = V.transpose()
    WW = np.zeros((Nf, Nf))
    for M in range(Nf):
        if (W[M] != 0.):
            WW[M, M] = 1. / W[M]
    Soln = np.dot(np.dot(np.dot(V, WW), U.transpose()), RHS)
    # Probe the degeneracy of the system
    Nd = Nf - Ns
    if (Nd > 0):
        Degenerate = np.where(W < 1e-12 * max(abs(W)))[0]
        if (len(Degenerate) != Nd):
            print "Degenerate direction(s) not properly identified"
            sys.exit()

        Deg_Soln = V.T[Degenerate]

        # Minimize the degenerate solutions relative to the overall sensitivity
        if (Nd == 1):
            Alpha = -sum(Soln * Deg_Soln * (Sigma**2)) / sum(
                (Deg_Soln**2) * (Sigma**2))
            Soln1 = Soln + Alpha * Deg_Soln
        else:
            M_Alpha = np.zeros((Nd, Nd))
            RHS_Alpha = np.zeros(Nd)

            for i in range(Nd):
                RHS_Alpha[i] = -sum(Soln * Deg_Soln[i] * Sigma**2)
                for j in range(Nd):
                    M_Alpha[i, j] = sum(Deg_Soln[i] * Deg_Soln[j] * Sigma**2)

            Alpha = np.dot(np.inv(M_Alpha), RHS_Alpha)
            Soln_0 = Soln
            Soln = Soln_0 + np.dot(Deg_Soln.T, Alpha)
    xd100 = h * 100e9 / (k * Tcmb)
    Soln /= (xd100 / np.tanh(xd100 / 2.) - 4)
    return Soln
Пример #18
0
def _analyze_results(opti_opts, bpe_results, jacobian, normalized=False):
    r"""Analyze the results."""
    # hard-coded values
    min_eig = 1e-14 # minimum allowed eigenvalue

    # alias the log level
    log_level = Logger().get_level()

    # get the names and number of parameters
    num_params    = len(bpe_results.param_names)

    # update the status
    if log_level >= 5:
        print('Analyzing final results.')
    if log_level >= 8:
        print('There were a total of {} function model evaluations.'.format(bpe_results.num_evals))

    # exit if nothing else to analyze
    if opti_opts.max_iters == 0:
        return

    # Compute values of un-normalized parameters.
    if normalized:
        param_typical = OptiParam.get_array(opti_opts.params, type_='typical')
        normalize_matrix  = np.eye(num_params) @ (1 / param_typical)
    else:
        normalize_matrix  = np.eye(num_params)

    # Make information, covariance matrix, compute Singular Value Decomposition (SVD).
    try:
        # note, python has x = U*S*Vh instead of U*S*V', when V = Vh'
        (_, S_jacobian, Vh_jacobian) = np.linalg.svd(jacobian @ normalize_matrix, full_matrices=False)
        V_jacobian = Vh_jacobian.T
        temp = np.power(S_jacobian, -2, out=np.zeros(S_jacobian.shape), where=S_jacobian > min_eig)
        covariance = V_jacobian @ np.diag(temp) @ Vh_jacobian
    except MemoryError:
        if log_level >= 6:
            print('Singular value decomposition of Jacobian failed.')
        V_jacobian = np.nan * np.ones((num_params, num_params))
        covariance = np.inv(jacobian.T @ jacobian)

    param_one_sigmas = np.sqrt(np.diag(covariance))
    param_one_sigmas[param_one_sigmas < min_eig] = np.nan
    correlation    = covariance / (param_one_sigmas[:, np.newaxis] @ param_one_sigmas[np.newaxis, :])
    covariance[np.isnan(correlation)] = np.nan

    # Update SVD and covariance for the normalized parameters (but correlation remains as calculated above)
    if normalized:
        try:
            (_, S_jacobian, Vh_jacobian) = np.linalg.svd(jacobian, full_matrices=False)
            V_jacobian = Vh_jacobian.T
            covariance = V_jacobian @ np.diag(S_jacobian**-2) @ Vh_jacobian
        except MemoryError:
            pass # caught in earlier exception (hopefully?)

    # update the results
    bpe_results.correlation  = correlation
    bpe_results.info_svd     = V_jacobian.T
    bpe_results.covariance   = covariance
Пример #19
0
 def utuh(self, u, mlh):
     no = u.ndim
     n = no - 1
     tmp = np.rot90(u, 2)
     z = np.fft.ifft(np.vstack(tmp, u[1:n]))
     z = 2 * np.real(z[0:no])
     ul = np.inv(mlh) * z
     return ul
Пример #20
0
    def continuously_updated_gmm():

        # First step uses identity weighting matrix
        W = lambda b: np.inv(self.Omegahat(b))

        bhat = self.minimize(lambda b: self.J(b, W(b)))

        return bhat
Пример #21
0
    def inv_matrix(self) -> np.ndarray:
        """
        Inverse of lattice matrix.

        :return: Inverse of lattice matrix
        :rtype: np.ndarray
        """
        return np.inv(self.matrix)
Пример #22
0
def twoPosition(P2x, P2y, theta1, theta2, thetaA, phiPm, betaA, Rho, ThetaBP,
                Gamma):
    p21 = math.sqrt((P2x)**2 + (P2y)**2)
    alpha2 = theta2 - theta1
    delta2 = math.atan2(P2y, P2x)

    chosenMatrix = [[
        math.cos(thetaA + betaA) - math.cos(thetaA),
        math.cos(phiPm + alpha2) - math.cos(phiPm)
    ],
                    [
                        math.sin(thetaA + betaA) - math.sin(thetaA),
                        math.sin(phiPm + alpha2) - math.sin(phiPm)
                    ]]
    knownMatrix = [p21 * math.cos(delta2), p21 * math.sin(delta2)]
    invA = np.inv(chosenMatrix)
    unknown = knownMatrix * invA

    wMagnitude = unknown[0]
    zMagnitude = unknown[1]

    P21 = v.Vector(p21, delta2)
    W1 = v.Vector(wMagnitude, thetaA)
    W2 = v.Vector(wMagnitude, thetaA + betaA)
    Z1 = v.Vector(zMagnitude, phiPm)
    Z2 = v.Vector(zMagnitude, phiPm + alpha2)

    chosenMatrix = [[
        math.cos(Rho + Gamma) - math.cos(Rho),
        math.cos(ThetaBP + alpha2) - math.cos(ThetaBP)
    ],
                    [
                        math.sin(Rho + Gamma) - math.sin(Rho),
                        math.sin(ThetaBP + alpha2) - math.sin(ThetaBP)
                    ]]
    knownMatrix = [p21 * math.cos(delta2), p21 * math.sin(delta2)]
    invA = np.inv(chosenMatrix)
    unknown = knownMatrix * invA

    uMagnitude = unknown[0]
    sMagnitude = unknown[1]

    U1 = v.Vector(uMagnitude, Rho)
    U2 = v.Vector(uMagnitude, Rho + Gamma)
    S1 = v.Vector(sMagnitude, ThetaBP)
    S2 = v.Vector(sMagnitude, ThetaBP + alpha2)
Пример #23
0
def doSomething(task_num):
    #print("executing...", task_num)
    for i in range(100000):
        A = np.random.normal(0, 1, (1000, 1000))
        B = np.inv(A)

    return random.randint(1, 10) * random.randint(
        1, 500)  # real operation, used random to avoid caches and so on...
Пример #24
0
def projection_gradient(f,
                        grad_f,
                        xs,
                        constraint_fs,
                        deriv_contrains,
                        ε1,
                        ε2=None,
                        MAX_ITERATIONS=10000):
    if ε2 is None:
        ε2 = ε1

    initial_constr_n = len(constraint_fs)
    start_xs = xs

    if not all(constr_f(xs) <= 0 for constr_f in constraint_fs):
        raise Exception('xStart not in domain')

    for it in range(MAX_ITERATIONS):
        grad_val = grad_f(xs)
        grad_matrix = np.array([grad_val]).transpose()
        border_vals = [constr_f(xs) for constr_f in constraint_fs]
        passive_constraints = {
            i
            for i, bv in enumerate(border_vals) if not (ε1 <= bv <= 0)
        }

        if len(passive_constraints) != initial_constr_n:
            matr = np.array([[f_i(xs) for f_i in row]
                             for i, row in enumerate(deriv_contrains)
                             if deriv_contrains not in passive_constraints])
            m = matr.transpose() @ np.inv(matr @ matr.transpose()) @ matr
            p = np.eye(len(m)) - m
            dx = (-p @ grad_matrix)[:, 0]
        else:
            dx = (-grad_matrix)[:, 0]

        distamce_to_borders = []
        for i, constr_f in enumerate(constraint_fs):
            if i in passive_constraints:
                continue

            zero_val, _, check, _ = fsolve(
                lambda a: constr_f(start_xs + a * dx),
                np.eye(1),
            )
            if zero_val >= 0 and check == 1:
                distamce_to_borders.append(zero_val[0])

        arg_min = optimize.minimize_scalar(lambda a: f(xs + a * dx)).x

        arg = min(arg_min, distamce_to_borders)
        dx *= arg
        xs += dx
        if np.linalg.norm(dx) < ε2:
            return xs, f(xs)

    print('Reached the end before convergence')
    return xs, f(xs)
Пример #25
0
def map_kpoints(other_kpoints, other_lattice, ref_lattice, ref_kpoints, ref_symrecs, has_timrev):
    """
    Build mapping between a list of k-points in reduced coordinates (`other_kpoints`)
    in the reciprocal lattice `other_lattice` and a list of reference k-points given
    in the reciprocal lattice `ref_lattice` with symmetry operations `ref_symrecs`.

    Args:
        other_kpoints:
        other_lattice: matrix whose rows are the reciprocal lattice vectors in cartesian coordinates.
        ref_lattice: same meaning as other_lattice.
        ref_kpoints:
        ref_symrecs: [nsym,3,3] arrays with symmetry operations in the `ref_lattice` reciprocal space.
        has_timrev: True if time-reversal can be used.

    Returns
        (o2r_map, nmissing)

        nmissing:
            Number of k-points in ref_kpoints that cannot be mapped onto ref_kpoints.

        o2r_map[i] gives the mapping  between the i-th k-point in other_kpoints and
            ref_kpoints. Set to None if the i-th k-point does not have any image in ref.
            Each entry is a named tuple with the following attributes:

                ik_ref:
                tsign:
                isym
                g0

            kpt_other = TS kpt_ref + G0
    """
    ref_gprimd_inv = np.inv(np.asarray(ref_lattice).T)
    other_gprimd = np.asarray(other_lattice).T
    other_kpoints = np.asarray(other_kpoints).reshape((-1, 3))
    ref_kpoints = np.asarray(ref_kpoints).reshape((-1, 3))
    o2r_map = len(other_kpoints) * [None]

    tsigns = (1, -1) if has_timrev else (1,)
    kmap = collections.namedtuple("kmap", "ik_ref, tsign, isym, g0")

    for ik_oth, okpt in enumerate(other_kpoints):
        # Get other k-point in reduced coordinates in the referece lattice.
        okpt_red = np.matmul(ref_gprimd_inv, np.matmul(other_gprimd, okpt))

        # k_other = TS k_ref + G0
        found = False
        for ik_ref, kref in enumerate(ref_kpoints):
            if found: break
            for tsign in tsign:
                for isym, symrec in enumerate(ref_symrecs):
                    krot = tsign * np.matmul(symrec, kref)
                    if issamek(okpt_red, krot):
                        g0 = np.rint(okpt_red - krot)
                        o2r_map[ik_oth] = kmap(ik_ref, tsign, isym, g0)
                        found = True
                        break

        return o2r_map, o2r_map.count(None)
Пример #26
0
 def update(state, time, cov):
     self.health = 50
     (X_k, P_k) = prediction(time - self.lasttime)
     residual = state - np.dot(Obsticle.H, X_k)
     S_k = np.dot(Obsticle.H, np.dot(P_k, Obsticle.H.T)) + np.diag(
         [cov, cov, cov, cov])
     K_k = np.dot(P_k, np.dot(Obtsicle.H.T, np.inv(S_k)))
     self.state = X_k + np.dot(K_k, residual)
     self.cov = P_k - np.dot(K_k, np.dot(S_k, K_k.T))
Пример #27
0
def log_likelihood_basis(transition, p_basis, rate, lagtime):
    overlap = calc_overlap_basis(p_basis)
    rhs = calc_rhs_diffusionequation_basisfunctions(p_basis, rate)
    vals, vecs = np.linalg.eig(np.dot(np.inv(overlap), rhs))

    # lagtime:
    prop = np.sum(vecs * np.exp(lagtime * vals), axis=X)

    pass
Пример #28
0
    def calculate_w(self, grad_h_x: np.array, s: np.array) -> np.array:
        # W = P*grad_h_x'*inv(S)
        w1 = np.inv(s)

        w2 = grad_h_x.transpose()
        w2 = np.matmul(self.p, w2)

        w = np.matmul(w2, w1)
        return w
Пример #29
0
def log_likelihood_basis(transition,p_basis,rate,lagtime):
    overlap = calc_overlap_basis(p_basis)
    rhs = calc_rhs_diffusionequation_basisfunctions(p_basis,rate)
    vals,vecs = np.linalg.eig(np.dot(np.inv(overlap),rhs))

    # lagtime:
    prop = np.sum(vecs*np.exp(lagtime*vals),axis=X)

    pass
Пример #30
0
    def _computer_delta(self, X, Y, r=0.0):
        """
            computer delta of each net, dcorr(X, Y) / dX

        PARAMETER:
            X, m * n1 matrix, is one kind of input data modality, and H1 is transposed matrix of X
                m is number of samples, which also means number of observations
                n1 is number of features in modality X, which also means number of variables
            Y, m * n2 matrix, is another kind of input data modality, and H2 is transposed matrix of Y
                m is number of samples, which also means number of observations
                n2 is number of features in modality Y, which also means number of variables
            r is regularized parameter
        References:
                G. Andrew, R. Arora, J. Bilmes, and K. Livescu. Deep canonical correlation analysis. In ICML, 2013.
        """
        m1, n1 = X.shape
        m2, n2 = Y.shape
        assert m1 == m2, 'DCCA computer delta: Input data need to contain same number of samples'

        X_mean = np.mean(X, axis=0)
        Y_mean = np.mean(X, axis=0)
        H_1 = (X - X_mean).T  # H1 bar
        H_2 = (Y - Y_mean).T  # H2 bar

        sigma11 = (1 / m1 - 1) * H_1.dot(H_1.T) + r * np.eye(n1)
        sigma12 = (1 / m1 - 1) * H_1.dot(H_2.T)
        sigma21 = (1 / m1 - 1) * H_2.dot(H_1.T)
        sigma22 = (1 / m1 - 1) * H_2.dot(H_2.T) + r * np.eye(n2)

        assert np.linalg.det(sigma11) != 0, 'DCCA computer delta: sigma11 is singular, ' \
                                            'please use regularized parameter to adapt this matrix'
        assert np.linalg.det(sigma22) != 0, 'DCCA computer delta: sigma22 is singular, ' \
                                            'please use regularized parameter to adapt this matrix'
        T1 = np.inv(np.sqrt(sigma11))
        T3 = np.inv(np.sqrt(sigma22))
        T = T1.dot(sigma12).dot(T3)  # T1 dot sigma12 dot T3
        U, D, V = np.linalg.svd(T)
        delta12 = functools.reduce(lambda x, y: np.dot(x, y), [T1, U, V, T3])
        delta11 = -0.5 * functools.reduce(lambda x, y: np.dot(x, y),
                                          [T1, U, D, U.T, T1])

        dcorr = (1 / m1 - 1) * (2 * np.dot(delta11, H_1) +
                                np.dot(delta12, H_2))
        return dcorr
Пример #31
0
def plante(ex, ey, ep, D, eq=None):
    Dshape = D.shape
    if Dshape[0] != 3:
        raise NameError('Wrong constitutive dimension in plante')

    if ep[0] == 1:
        return tri3e(ex, ey, D, ep[1], eq)
    else:
        Dinv = np.inv(D)
        return tri3e(ex, ey, Dinv, ep[1], eq)
Пример #32
0
    def _computer_delta(self, X, Y, r=0.0):
        """
            computer delta of each net, dcorr(X, Y) / dX

        PARAMETER:
            X, m * n1 matrix, is one kind of input data modality, and H1 is transposed matrix of X
                m is number of samples, which also means number of observations
                n1 is number of features in modality X, which also means number of variables
            Y, m * n2 matrix, is another kind of input data modality, and H2 is transposed matrix of Y
                m is number of samples, which also means number of observations
                n2 is number of features in modality Y, which also means number of variables
            r is regularized parameter
        References:
                G. Andrew, R. Arora, J. Bilmes, and K. Livescu. Deep canonical correlation analysis. In ICML, 2013.
        """
        m1, n1 = X.shape
        m2, n2 = Y.shape
        assert m1 == m2, 'DCCA computer delta: Input data need to contain same number of samples'

        X_mean = np.mean(X, axis=0)
        Y_mean = np.mean(X, axis=0)
        H_1 = (X - X_mean).T        # H1 bar
        H_2 = (Y - Y_mean).T        # H2 bar

        sigma11 = (1 / m1 - 1) * H_1.dot(H_1.T) + r * np.eye(n1)
        sigma12 = (1 / m1 - 1) * H_1.dot(H_2.T)
        sigma21 = (1 / m1 - 1) * H_2.dot(H_1.T)
        sigma22 = (1 / m1 - 1) * H_2.dot(H_2.T) + r * np.eye(n2)

        assert np.linalg.det(sigma11) != 0, 'DCCA computer delta: sigma11 is singular, ' \
                                            'please use regularized parameter to adapt this matrix'
        assert np.linalg.det(sigma22) != 0, 'DCCA computer delta: sigma22 is singular, ' \
                                            'please use regularized parameter to adapt this matrix'
        T1 = np.inv(np.sqrt(sigma11))
        T3 = np.inv(np.sqrt(sigma22))
        T = T1.dot(sigma12).dot(T3)         # T1 dot sigma12 dot T3
        U, D, V = np.linalg.svd(T)
        delta12 = functools.reduce(lambda x, y: np.dot(x, y), [T1, U, V, T3])
        delta11 = -0.5 * functools.reduce(lambda x, y: np.dot(x, y), [T1, U, D, U.T, T1])

        dcorr = (1 / m1 - 1) * (2 * np.dot(delta11, H_1) + np.dot(delta12, H_2))
        return dcorr
def get_mu(Pmax, Hkk, btmp):
    Rmu = np.sqrt(Pmax) / np.norm(Hkk)
    Lmu = 0
    N = Hkk.shape[0]
    Pcomp = np.matmul(
        np.matmul(Hkk.T, np.linalg.matrix_power(np.inv(btmp), 2)), Hkk)
    if (Pcomp < Pmax):
        return Lmu

    I = eye(N)
    while (Rmu - Lmu > 1e-1):
        midmu = (Rmu + Lmu) / 2
        Pcomp = np.matmul(
            np.matmul(Hkk.T, np.linalg.matrix_power(np.inv(btmp + midmu * I),
                                                    2)), Hkk)
        if (Pcomp < Pmax):
            Rmu = midmu
        else:
            Lmu = midmu
    return Lmu
Пример #34
0
def gauss_pdf(x, m, s):
    """
    Gaussian probability distribution function at point x, mean m, and variance s.
    """
    if m.shape[1] == 1:
        dx = x - tile(m, x.shape[1])
        e = 0.5 * sum(dx * (np.dot(np.inv(s), dx)), axis=0)
        e - e + 0.5 * m.shape[0] * log(2 * np.pi) + 0.5 * np.log(np.linalg.det(s))
        p = np.exp(-e)
    elif x.shape[1] == 1:
        dx = tile(x, m.shape[1] - m)
        e = 0.5 * sum(dx * (np.dot(np.inv(s), dx)), axis =0)
        e = e + 0.5 * m.shape[0] * np.log(2 * np.pi) + 0.5 * np.log(np.linalg.det(s))
        p = np.exp(-e)
    else:
        dx = x - m
        e = 0.5 * np.dot(dx.T, np.dot(np.inv(s), dx))
        e = e + 0.5 * m.shape[0] * np.log(2 * np.pi) + 0.5 * np.log(np.lingalg.det(s))
        p = exp(-e)
    return (p[0], e[0]) # p-value and probability
Пример #35
0
def setup_asr_step_methods(m, vars, additional_stochs=[]):
    # groups RE stochastics that are suspected of being dependent
    groups = []
    fe_group = [n for n in vars.get('beta', []) if isinstance(n, mc.Stochastic)]
    ap_group = [n for n in vars.get('gamma', []) if isinstance(n, mc.Stochastic)]
    groups += [[g_i, g_j] for g_i, g_j in zip(ap_group[1:], ap_group[:-1])] + [fe_group, ap_group, fe_group+ap_group]

    for a in vars.get('hierarchy', []):
        group = []

        col_map = dict([[key, i] for i,key in enumerate(vars['U'].columns)])
        
        if a in vars['U']:
            for b in nx.shortest_path(vars['hierarchy'], 'all', a):
                if b in vars['U']:
                    n = vars['alpha'][col_map[b]]
                    if isinstance(n, mc.Stochastic):
                        group.append(n)
        groups.append(group)
        #if len(group) > 0:
            #group += ap_group
            #groups.append(group)
            #group += fe_group
            #groups.append(group)
                    
    for stoch in groups:
        if len(stoch) > 0 and np.all([isinstance(n, mc.Stochastic) for n in stoch]):
            # only step certain stochastics, for understanding convergence
            #if 'gamma_i' not in stoch[0].__name__:
            #    print 'no stepper for', stoch
            #    m.use_step_method(mc.NoStepper, stoch)
            #    continue

            #print 'finding Normal Approx for', [n.__name__ for n in stoch]
            if additional_stochs == []:
                vars_to_fit = [vars.get('p_obs'), vars.get('pi_sim'), vars.get('smooth_gamma'), vars.get('parent_similarity'),
                               vars.get('mu_sim'), vars.get('mu_age_derivative_potential'), vars.get('covariate_constraint')]
            else:
                vars_to_fit = additional_stochs

            try:
                raise ValueError
                na = mc.NormApprox(vars_to_fit + stoch)
                na.fit(method='fmin_powell', verbose=0)
                cov = np.array(np.inv(-na.hess), order='F')
                #print 'opt:', np.round_([n.value for n in stoch], 2)
                #print 'cov:\n', cov.round(4)
                if np.all(np.eigvals(cov) >= 0):
                    m.use_step_method(mc.AdaptiveMetropolis, stoch, cov=cov)
                else:
                    raise ValueError
            except ValueError:
                #print 'cov matrix is not positive semi-definite'
                m.use_step_method(mc.AdaptiveMetropolis, stoch)
Пример #36
0
def setup_asr_step_methods(m, vars, additional_stochs=[]):
    # groups RE stochastics that are suspected of being dependent
    groups = []
    fe_group = [n for n in vars.get('beta', []) if isinstance(n, mc.Stochastic)]
    ap_group = [n for n in vars.get('gamma', []) if isinstance(n, mc.Stochastic)]
    groups += [[g_i, g_j] for g_i, g_j in zip(ap_group[1:], ap_group[:-1])] + [fe_group, ap_group, fe_group+ap_group]

    for a in vars.get('hierarchy', []):
        group = []

        col_map = dict([[key, i] for i,key in enumerate(vars['U'].columns)])

        if a in vars['U']:
            for b in nx.shortest_path(vars['hierarchy'], 'all', a):
                if b in vars['U']:
                    n = vars['alpha'][col_map[b]]
                    if isinstance(n, mc.Stochastic):
                        group.append(n)
        groups.append(group)
        #if len(group) > 0:
            #group += ap_group
            #groups.append(group)
            #group += fe_group
            #groups.append(group)

    for stoch in groups:
        if len(stoch) > 0 and np.all([isinstance(n, mc.Stochastic) for n in stoch]):
            # only step certain stochastics, for understanding convergence
            #if 'gamma_i' not in stoch[0].__name__:
            #    print 'no stepper for', stoch
            #    m.use_step_method(mc.NoStepper, stoch)
            #    continue

            #print 'finding Normal Approx for', [n.__name__ for n in stoch]
            if additional_stochs == []:
                vars_to_fit = [vars.get('p_obs'), vars.get('pi_sim'), vars.get('smooth_gamma'), vars.get('parent_similarity'),
                               vars.get('mu_sim'), vars.get('mu_age_derivative_potential'), vars.get('covariate_constraint')]
            else:
                vars_to_fit = additional_stochs

            try:
                raise ValueError
                na = mc.NormApprox(vars_to_fit + stoch)
                na.fit(method='fmin_powell', verbose=0)
                cov = np.array(np.inv(-na.hess), order='F')
                #print 'opt:', np.round_([n.value for n in stoch], 2)
                #print 'cov:\n', cov.round(4)
                if np.all(np.eigvals(cov) >= 0):
                    m.use_step_method(mc.AdaptiveMetropolis, stoch, cov=cov)
                else:
                    raise ValueError
            except ValueError:
                #print 'cov matrix is not positive semi-definite'
                m.use_step_method(mc.AdaptiveMetropolis, stoch)
Пример #37
0
def apriltag_callback(data):
    # use apriltag pose detection to find where is the robot
    if len(data.detections)!=0:  # check if apriltag is detected
    	detection = data.detections[0]
    	print detection.pose 
    	if detection.id == 21:   # tag id is the correct one
	    print detection	
                # Use the functions in helper.py to do the following 
		# step 1. convert the pose to poselist Hint: pose data => detection.pose.pose 
		tf_apriltag_camera = pose2poselist(detection.pose.pose)
                # step 2. do the matrix manipulation 
                tf_base_map = tf_apriltag_map * np.inv (tf_camera_base * tf_apriltag_map)
Пример #38
0
def measurement_update(sensor_var, p_cov_check, y_k, p_check, v_check, q_check):
    # 3.1 Compute Kalman Gain
    K_k = p_cov_check@h_jac.T@inv(h_jac@p_cov_check@h_jac.T + np.eye(3)*sensor_var)
    # 3.2 Compute error state
    delta_x = np.dot(K_k, (y_k - p_check))
    # 3.3 Correct predicted state
    p_hat = p_check + delta_x[0:3]
    v_hat = v_check + delta_x[3:6]
    q_hat = Quaternion(axis_angle=delta_x[6:9]).quat_mult_right(q_check)
    # 3.4 Compute corrected covariance
    p_cov_hat = (np.eye(9) - K_k@h_jac)@p_cov_check

    return p_hat, v_hat, q_hat, p_cov_hat
Пример #39
0
    def calcCMat(self, callback=None, progressCallback=None):

        nSlopes = self.wfss[0].activeSubaps*2

        self.controlShape = (nSlopes, self.simConfig.totalActs)
        self.controlMatrix = numpy.zeros((nSlopes, self.simConfig.totalActs))
        acts = 0
        for dm in xrange(self.simConfig.nDM):
            dmIMat = self.dms[dm].iMat

            if dmIMat.shape[0]==dmIMat.shape[1]:
                dmCMat = numpy.inv(dmIMat)
            else:
                dmCMat = numpy.linalg.pinv(dmIMat, self.dmConds[dm])

            self.controlMatrix[:,acts:acts+self.dms[dm].acts] = dmCMat
            acts += self.dms[dm].acts
Пример #40
0
def calculate_probablity(mean,alpha,covariance,data):
    clusters = [[] for i in range(3)]
    for point in data:
        probablities = []
        clusters = [[] for i in range(3)]
        for i in range(len(alpha)):
            determinant_covariance = np.linalg.det(covariance[i])
            prior = 1/(2*pi*(math.sqrt(determinant_covariance)))
            exponential_term = math.exp(-
                (np.matrix((point-mean[i]))*(np.inv(np.matrix(covariance[i])))*np.matrix((point-mean[i])))/2)
            probablities.append(alpha[i]*prior*exponential_term)
        for i in range(len(probablities)):
            max = probablities[0]
            index = 0
            if (max > probablities[i]):
                max = probablities[i]
                index = i
            clusters[index] = point

    return clusters
Пример #41
0
    def test_set_linear_system_solver(self):

        g = ODEFunctionGenerator(self.sys.eom_method.forcing_full,
                                 self.sys.coordinates,
                                 self.sys.speeds,
                                 self.sys.constants_symbols,
                                 mass_matrix=self.sys.eom_method.mass_matrix_full)

        assert g._solve_linear_system == np.linalg.solve

        g = ODEFunctionGenerator(self.sys.eom_method.forcing_full,
                                 self.sys.coordinates,
                                 self.sys.speeds,
                                 self.sys.constants_symbols,
                                 mass_matrix=self.sys.eom_method.mass_matrix_full,
                                 linear_sys_solver='numpy')

        assert g._solve_linear_system == np.linalg.solve

        g = ODEFunctionGenerator(self.sys.eom_method.forcing_full,
                                 self.sys.coordinates,
                                 self.sys.speeds,
                                 self.sys.constants_symbols,
                                 mass_matrix=self.sys.eom_method.mass_matrix_full,
                                 linear_sys_solver='scipy')

        assert g._solve_linear_system == sp.linalg.solve

        solver = lambda A, b: np.dot(np.inv(A), b)

        g = ODEFunctionGenerator(self.sys.eom_method.forcing_full,
                                 self.sys.coordinates,
                                 self.sys.speeds,
                                 self.sys.constants_symbols,
                                 mass_matrix=self.sys.eom_method.mass_matrix_full,
                                 linear_sys_solver=solver)

        assert g._solve_linear_system == solver
Пример #42
0
def returnKernel(kernel, var=1):
  """
  Utility function to return the correct kernel function given a string
  identifying the name of the kernel
  """
  kernel = kernel.lower()
  if kernel in ['gaussian','normal','l2']:
    kern = lambda XX: np.exp(-pdist2(XX,'sqeuclidean')/var)
  elif kernel in ['laplace','laplacian','l1']:
    kern = lambda XX:  np.exp(-pdist2(XX,'minkowski',1)/var)
  elif kernel in ['dirac', 'delta','kronecker']:
    equals = lambda u,v: 1 - (np.array(u)==np.array(v)).all()
    kern = lambda XX: pdist2(XX,equals)
  elif kernel in ['mahalanobis']:
    VInv = np.inv(var)
    kern = lambda XX: np.exp(-np.power(pdist2(XX,'mahalanobis',VInv),2))
  elif kernel in ['beta']:
    dot = lambda XX: XX.dot(XX.T)
    kern = lambda XX: dot(np.matrix(phi_beta(XX,var[0],var[1])))
  elif kernel in ['beta_shifted','beta shifted']:
    dot = lambda XX: XX.dot(XX.T)
    kern = lambda XX: dot(np.matrix(phi_beta_shifted(XX,var[0],var[1])))
  return kern
Пример #43
0
def crossComputeKerns(X,Y,kernel,symmetric,var=1):
  """
  Compute pairwise kernel between points in two matrices
  Inputs:
     X: m x D matrix with m samples of dimension D
     kernel: (string) name of kernel being computed
     Y: n x D matrix with n samples of dimension D
     kernel: (string) name of kernel being computed
     k: (int) rank of model (number of hidden states)
     symmetric: whether the model has symmetric views or not (should be false
          for HMM)
     var: for gaussian kernel, the variance (sigma^2)
      for laplacian kernel, the bandwidth
      for mahalanobis kernel, the covariance matrix
      for delta kernel, None
  Outputs: tuple (K,L,G), where:
     K: cross-kernel matrix c of dimension m x n
  """
  kernel = kernel.lower()
  if kernel in ['gaussian','normal','l2']:
    kern = lambda XX,YY: np.exp(-cdist(XX,YY,'sqeuclidean')/var)
  elif kernel in ['laplace','laplacian','l1']:
    kern = lambda XX,YY:  np.exp(-cdist(XX,YY,'minkowski',1)/var)
  elif kernel in ['dirac', 'delta','kronecker']:
    equals = lambda u,v: 1 - (np.array(u)==np.array(v)).all()
    kern = lambda XX,YY: cdist(XX,YY,equals)
  elif kernel in ['mahalanobis']:
    VInv = np.inv(var)
    kern = lambda XX,YY: np.exp(-np.power(cdist(XX,YY,'mahalanobis',VInv),2))
  elif kernel in ['beta','betadot']:
    kern = lambda XX,YY: np.matrix(phi_beta(XX,var[0],var[1]).dot(
                                   phi_beta(YY,var[0],var[1]).T))
  elif kernel in ['beta_shifted','beta shifted']:
    kern = lambda XX,YY: np.matrix(phi_beta_shifted(XX,var[0],var[1]).dot(
                                   phi_beta_shifted(YY,var[0],var[1]).T))
  K = kern(X,Y)
  return K
Пример #44
0
        A2[indices] = 0.
        A2[indices, indices] = 1.
    m = N-3
    #A2[2*dofs+m,:] = 0.
    #A2[2*dofs+m,2*dofs+m] = 1.
    #F[2*dofs+m] = 0.
    
    #W=sparse.csr_matrix(W);
    A2=sparse.csr_matrix(A2);
    print "Time to update", time.time()-t1
    print "Starting to solve..."
    t1 = time.time()
    #p_old = np.copy(pressures)
    for k in range(0,Ns_it): #SIMPLE ALGORITHM
	velocities = sla.spsolve(A2,F)
	pressures = sla.spsolve(B.T*np.inv(D)*B,-B.T*velocities)
	velocities = velocities + np.inv(D)*B*pressures
	#pressures = pressures #in the SIMPLE algorithm, but not needed
	if np.linalg.norm(pressures-p_old)/np.linalg.norm(p_old) < eps:
		print "SIMPLE converged!"
		break
    print "Time to solve: ", time.time()-t1
    counter += 1
    print "error :                    ", error

################################
#       PLOTTING:
################################
# Making global X and Y coordinates

Пример #45
0
def kalman_filter(y, A, C, Q, R, init_x, init_V, varargin={}):
    # Kalman filter.
    # [x, V, VV, loglik] = kalman_filter(y, A, C, Q, R, init_x, init_V, \)
    #
    # INPUTS:
    # y(:,t)   - the observation at time t
    # A - the system matrix
    # C - the observation matrix
    # Q - the system covariance
    # R - the observation covariance
    # init_x - the initial state (column) vector
    # init_V - the initial state covariance
    #
    # OPTIONAL INPUTS (string/value pairs [default in brackets])
    # 'model' - model(t)=m means use params from model m at time t [ones(1,T) ]
    #     In this case, all the above matrices take an additional final dimension,
    #     i.e., A(:,:,m), C(:,:,m), Q(:,:,m), R(:,:,m).
    #     However, init_x and init_V are independent of model(1).
    # 'u'     - u(:,t) the control signal at time t [ [] ]
    # 'B'     - B(:,:,m) the input regression matrix for model m
    #
    # OUTPUTS (where X is the hidden state being estimated)
    # x(:,t) = E[X(:,t) | y(:,1:t)]
    # V(:,:,t) = Cov[X(:,t) | y(:,1:t)]
    # VV(:,:,t) = Cov[X(:,t), X(:,t-1) | y(:,1:t)] t >= 2
    # loglik = sum{t=1}^T log P(y(:,t))
    #
    # If an input signal is specified, we also condition on it:
    # e.g., x(:,t) = E[X(:,t) | y(:,1:t), u(:, 1:t)]
    # If a model sequence is specified, we also condition on it:
    # e.g., x(:,t) = E[X(:,t) | y(:,1:t), u(:, 1:t), m(1:t)]

    assert type(y) == np.matrixlib.defmatrix.matrix
    (os, T) = y.shape
    ss = A.shape[0]
    # size of state space

    # set default params
    model = np.ones(T)
    u = np.array([])
    B = np.array([])
    ndx = np.array([])

    if varargin:
        model = varargin["model"]
        u = varargin["u"]
        B = varargin["B"]
        ndx = varargin.get("ndx", ndx)

    x = np.zeros((ss, T))
    V = np.zeros((ss, ss, T))
    VV = np.zeros((ss, ss, T))

    loglik = 0
    for t in range(0, T):
        if t == 0:
            # prevx = init_x(:,m);
            # prevV = init_V(:,:,m);
            prevx = init_x
            prevV = init_V
            initial = 1
        else:
            prevx = x[:, t - 1]
            prevV = V[:, :, t - 1]
            initial = 0

        if u.size == 0:
            assert type(y) == np.matrixlib.defmatrix.matrix
            (xr, Vr, LL, VVr) = kalman_update(A, C, Q, R, y[:, t], prevx, prevV, {"initial": initial})
            x[:, t] = xr
            V[:, :, t] = Vr
            VV[:, :, t] = VVr
        else:
            if ndx.size == 0:
                (x[:, t], V[:, :, t], LL, VV[:, :, t]) = kalman_update(
                    A, C, Q, R, y[:, t], prevx, prevV, {"initial": initial, "u": u[:, t], "B": B}
                )
            else:
                i = ndx[t]
                # copy over all elements; only some will get updated
                x[:, t] = prevx
                prevP = np.inv(prevV)
                prevPsmall = prevP[i, i]
                prevVsmall = np.inv(prevPsmall)
                (x[i, t], smallV, LL, VV[i, i, t]) = kalman_update(
                    A[i, i],
                    C[:, i],
                    Q[i, i],
                    R,
                    y[:, t],
                    prevx[i],
                    prevVsmall,
                    {"initial": initial, "u": u[:, t], "B": B[i, :]},
                )
                smallP = np.inv(smallV)
                prevP[i, i] = smallP
                V[:, :, t] = np.inv(prevP)

        loglik = loglik + LL[0, 0]

    return (x, V, VV, loglik)
def internal_indices(features,orderings,distance="euclidean"):
    clusters={}
    indices=[]
    for i,x in enumerate(orderings):
        if x in clusters.keys():
            clusters[x].append(features[i])
        else:
            clusters[x]=[features[i]]
    if distance == "seuclidean":
        cmd = "dis.seuclidean(Q,R,variances)"
    elif distance == "mahalanobis":
        cmd = "dis.mahalanobis(Q,R,inv)"
    else:
        cmd = "dis."+distance+"(Q,R)"
    # 'A'
    centroids={}
    # 'B'
    avgdissim={}
    for i in clusters.keys():
        centroids[i]=np.mean(clusters[i],axis=0)
        sumdist=0
        variances=np.var(clusters[i],axis=0) if distance = "seuclidean"
        inv = np.inv(np.cov(clusters[i])) if distance = "mahalanobis"
        for x in clusters[i]:
            third=third+"variances)" if distance="seuclidean"
            third=
            sumdist+=eval(cmd.replace('Q','x').replace('R','clusters[i]'))
        avgdissim[i]=sumdist/len(clusters[i])
    maxB=max(avgdissim)
    # 'D'
    dists={}
    for c,i in enumerate(clusters.keys()):
        # 'C'
        i_to_centroid=[]
        for j in np.delete(clusters.keys(),c):
            sumdist=0
            variances=np.var(clusters[j],axis=0) if distance = "seuclidean"
            inv = np.inv(np.cov(clusters[j])) if distance = "mahalanobis"
            for x in clusters[i]:
                sumdist+=eval(cmd.replace('Q','x').replace('R','centroids[j]'))
            i_to_centroid.append(sumdist/len(clusters[i]))
            a,b=sorted([i,j])
            dists[str(a)+str(b)]=eval(cmd.replace('Q','centroids[i]').replace('R','centroids[j]'))
        a,b=avgdissim[i],min(i_to_centroid)
        # average Silhouette of cluster
        silhouette=(b-a)/max([a,b])
        # Davies-Bouldin coefficient
        #    the average over all clusters would be the davies bouldin index
        temp=dict(avgdissim)
        del temp[i]
        d,e=max(temp.iteritems(),key=operator.itemgetter(1))
        a,b=sorted([i,d])
        dbc=(avgdissim[i]+e)/dists[str(a)+str(b)]
        # Dunn coefficient
        #     the minimum over all clusters would be the Dunn index
        temp=[s for s in dists.keys() if str(i) in s]
        temp={k: dists[k] for k in temp}
        di=min(temp.values())/maxB
        # add indices to the list
        a,b=min(temp.values()),max(temp.values())
        indices.append([silhouette,dbc,di,avgdissim[i],a,b])
    return indices
Пример #47
0
def read_meas_info(source, tree=None):
    """Read the measurement info

    Parameters
    ----------
    source: string or file
        If string it is the file name otherwise it's the file descriptor.
        If tree is missing, the meas output argument is None

    tree: tree
        FIF tree structure

    Returns
    -------
    info: dict
       Info on dataset

    meas: dict
        Node in tree that contains the info.

    """
    if tree is None:
       fid, tree, _ = fiff_open(source)
       open_here = True
    else:
       fid = source
       open_here = False

    #   Find the desired blocks
    meas = dir_tree_find(tree, FIFF.FIFFB_MEAS)
    if len(meas) == 0:
        if open_here:
            fid.close()
        raise ValueError, 'Could not find measurement data'
    if len(meas) > 1:
        if open_here:
            fid.close()
        raise ValueError, 'Cannot read more that 1 measurement data'
    meas = meas[0]

    meas_info = dir_tree_find(meas, FIFF.FIFFB_MEAS_INFO)
    if len(meas_info) == 0:
        if open_here:
            fid.close()
        raise ValueError, 'Could not find measurement info'
    if len(meas_info) > 1:
        if open_here:
            fid.close()
        raise ValueError, 'Cannot read more that 1 measurement info'
    meas_info = meas_info[0]

    #   Read measurement info
    dev_head_t = None
    ctf_head_t = None
    meas_date = None
    highpass = None
    lowpass = None
    nchan = None
    sfreq = None
    chs = []
    p = 0
    for k in range(meas_info.nent):
        kind = meas_info.directory[k].kind
        pos  = meas_info.directory[k].pos
        if kind == FIFF.FIFF_NCHAN:
            tag = read_tag(fid, pos)
            nchan = int(tag.data)
        elif kind == FIFF.FIFF_SFREQ:
            tag = read_tag(fid, pos)
            sfreq = tag.data
        elif kind == FIFF.FIFF_CH_INFO:
            tag = read_tag(fid, pos)
            chs.append(tag.data)
            p += 1
        elif kind == FIFF.FIFF_LOWPASS:
            tag = read_tag(fid, pos)
            lowpass = tag.data
        elif kind == FIFF.FIFF_HIGHPASS:
            tag = read_tag(fid, pos)
            highpass = tag.data
        elif kind == FIFF.FIFF_MEAS_DATE:
            tag = read_tag(fid, pos)
            meas_date = tag.data
        elif kind == FIFF.FIFF_COORD_TRANS:
            tag = read_tag(fid, pos)
            cand = tag.data
            if cand['from_'] == FIFF.FIFFV_COORD_DEVICE and \
                                cand['to'] == FIFF.FIFFV_COORD_HEAD: # XXX : from
                dev_head_t = cand
            elif cand['from_'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD and \
                                cand['to'] == FIFF.FIFFV_COORD_HEAD:
                ctf_head_t = cand

    # Check that we have everything we need
    if nchan is None:
       if open_here:
           fid.close()
       raise ValueError, 'Number of channels in not defined'

    if sfreq is None:
        if open_here:
            fid.close()
        raise ValueError, 'Sampling frequency is not defined'

    if len(chs) == 0:
        if open_here:
            fid.close()
        raise ValueError, 'Channel information not defined'

    if len(chs) != nchan:
        if open_here:
            fid.close()
        raise ValueError, 'Incorrect number of channel definitions found'

    if dev_head_t is None or ctf_head_t is None:
        hpi_result = dir_tree_find(meas_info, FIFF.FIFFB_HPI_RESULT)
        if len(hpi_result) == 1:
            hpi_result = hpi_result[0]
            for k in range(hpi_result.nent):
               kind = hpi_result.directory[k].kind
               pos  = hpi_result.directory[k].pos
               if kind == FIFF.FIFF_COORD_TRANS:
                    tag = read_tag(fid, pos)
                    cand = tag.data;
                    if cand.from_ == FIFF.FIFFV_COORD_DEVICE and \
                                cand.to == FIFF.FIFFV_COORD_HEAD: # XXX: from
                        dev_head_t = cand;
                    elif cand.from_ == FIFF.FIFFV_MNE_COORD_CTF_HEAD and \
                                cand.to == FIFF.FIFFV_COORD_HEAD:
                        ctf_head_t = cand;

    #   Locate the Polhemus data
    isotrak = dir_tree_find(meas_info,FIFF.FIFFB_ISOTRAK)
    if len(isotrak):
        isotrak = isotrak[0]
    else:
        if len(isotrak) == 0:
            if open_here:
                fid.close()
            raise ValueError, 'Isotrak not found'
        if len(isotrak) > 1:
            if open_here:
                fid.close()
            raise ValueError, 'Multiple Isotrak found'

    dig = []
    if len(isotrak) == 1:
        for k in range(isotrak.nent):
            kind = isotrak.directory[k].kind;
            pos  = isotrak.directory[k].pos;
            if kind == FIFF.FIFF_DIG_POINT:
                tag = read_tag(fid,pos);
                dig.append(tag.data)
                dig[-1]['coord_frame'] = FIFF.FIFFV_COORD_HEAD

    #   Locate the acquisition information
    acqpars = dir_tree_find(meas_info, FIFF.FIFFB_DACQ_PARS);
    acq_pars = None
    acq_stim = None
    if len(acqpars) == 1:
        acqpars = acqpars[0]
        for k in range(acqpars.nent):
            kind = acqpars.directory[k].kind
            pos  = acqpars.directory[k].pos
            if kind == FIFF.FIFF_DACQ_PARS:
                tag = read_tag(fid, pos)
                acq_pars = tag.data
            elif kind == FIFF.FIFF_DACQ_STIM:
                tag = read_tag(fid, pos)
                acq_stim = tag.data

    #   Load the SSP data
    projs = read_proj(fid, meas_info)

    #   Load the CTF compensation data
    comps = read_ctf_comp(fid, meas_info, chs)

    #   Load the bad channel list
    bads = _read_bad_channels(fid, meas_info)

    #
    #   Put the data together
    #
    if tree.id is not None:
       info = dict(file_id=tree.id)
    else:
       info = dict(file_id=None)

    #  Make the most appropriate selection for the measurement id
    if meas_info.parent_id is None:
        if meas_info.id is None:
            if meas.id is None:
                if meas.parent_id is None:
                    info['meas_id'] = info.file_id
                else:
                    info['meas_id'] = meas.parent_id
            else:
                info['meas_id'] = meas.id
        else:
            info['meas_id'] = meas_info.id
    else:
       info['meas_id'] = meas_info.parent_id;

    if meas_date is None:
       info['meas_date'] = [info['meas_id']['secs'], info['meas_id']['usecs']]
    else:
       info['meas_date'] = meas_date

    info['nchan'] = nchan
    info['sfreq'] = sfreq
    info['highpass'] = highpass if highpass is not None else 0
    info['lowpass'] = lowpass if lowpass is not None else info['sfreq']/2.0

    #   Add the channel information and make a list of channel names
    #   for convenience
    info['chs'] = chs;
    info['ch_names'] = [ch.ch_name for ch in chs]

    #
    #  Add the coordinate transformations
    #
    info['dev_head_t'] = dev_head_t
    info['ctf_head_t'] = ctf_head_t
    if dev_head_t is not None and ctf_head_t is not None:
       info['dev_ctf_t'] = info['dev_head_t']
       info['dev_ctf_t'].to = info['ctf_head_t'].from_ # XXX : see if better name
       info['dev_ctf_t'].trans = np.dot(np.inv(ctf_head_t.trans), info.dev_ctf_t.trans)
    else:
       info['dev_ctf_t'] = []

    #   All kinds of auxliary stuff
    info['dig'] = dig
    info['bads'] = bads
    info['projs'] = projs
    info['comps'] = comps
    info['acq_pars'] = acq_pars
    info['acq_stim'] = acq_stim

    if open_here:
       fid.close()

    return info, meas
Пример #48
0
def _getOrig(P, M, u=1, v=0):
    orig0 = np.dot(np.inv(P), [u, v, -1, 1])
    orig0 = np.dot(np.inv(M), orig0)
    orig0 = orig0/orig0[-1]
    return orig0
Пример #49
0
def rollo_ekf(fstate, x_pp, u, Jfunc, Jh, E_pp, hmeas, z, Q, R):
    "Calculates the final position accodrding to the odometry model of Rollo"

#function [x_cc, E_cc] = rollo_ekf(fstate, x_pp, u, Jfunc, Jh, E_pp, hmeas, z, Q, R)

#%% PREDICTION UPDATE
# Nonlinear update and linearization at current state
    x_cp = fstate(x_pp, u) # state prediction, x_k|k-1
    Jf = Jfunc(x_pp, u)

# Partial covariance update
    E_cp = Jf * E_pp * np.transpose(Jf) + Q # E_k|k-1

#%% INNOVATION UPDATE
# Nonlinear measurement and linearization
    z_estimate = hmeas(x_cp);

    P12 = E_cp * np.transpose(Jh) # Cross covariance
    S_inv = np.inv(Jh * P12 + R);
    # M_inv = matrix3by3_inverse(H*P12+R)
    M_inv = np.invert(H * P12 + R)
    H = P12 * S_inv # Kalman filter gain, H_k

    x_cc = x_cp + H * (z - z_estimate) # State estimate, x_k|k
    E_cc = E_cp - H * np.transpose(P12) # State covariance matrix, E_k|k

#    if degrees:
#        Theta_i = Theta_i / 180.0 * np.pi
#    S_L = t * (n_L / 60.0) * 2 * np.pi * r_L # Linear distance traveled by left wheel in meters
#    S_R = t * (n_R / 60.0) * 2 * np.pi * r_R # Linear distance traveled by right wheel in meters
#    if verbose:
#        print('Linear distance traveled by left wheel (S_L [m]): ', S_L)
#        print('Linear distance traveled by right wheel (S_R [m]): ', S_R)
#    if (S_R > S_L):
##    if (np.abs(S_R - S_L) > EPS):
#        r  = (axle_l / 2.0) * ((S_L + S_R) / (S_R - S_L)) # travel_radius
#        beta = S_R / (r + axle_l / 2.0) # travel_angle
#        P_f_x = P_i_x - r * (1 - np.cos(beta))
#        P_f_y = P_i_y + r * np.sin(beta)
#        Theta_f = beta + Theta_i;
#    elif (S_R < S_L):
##    elif (np.abs(S_R - S_L) < -EPS):
#        r = (axle_l / 2.0) * ((S_L + S_R) / (S_L - S_R)) # travel_radius
#        beta = S_R / (r - axle_l / 2.0) # travel_angle
#        P_f_x = P_i_x + r * (1 - np.cos(beta))
#        P_f_y = P_i_y + r * np.sin(beta)
#        Theta_f = Theta_i - beta
#    elif (S_R == S_L):
##    elif (np.abs(S_R - S_L) < EPS):
#        r = 0
#        beta = 0
#        P_f_x = P_i_x + S_L * np.cos(Theta_i)
#        P_f_y = P_i_y + S_L * np.sin(Theta_i)
#        Theta_f = Theta_i
#    else:
#        print('Error')
#        exit(1);
#    if degrees:
#        Theta_f = Theta_f * 180 / np.pi
#    if verbose:
#        print('Travel radius [m]: ', r)
#        if degrees:
#            print('Travel angle [°]:', beta)
#        else:
#            print('Travel angle [rad]:', beta)
    return [x_cc, E_cc]
Пример #50
0
alist=map(int,raw_input().split())
from numpy import inv
a=np.array([[1,1,1],[4,2,1],[9,3,1],[16,4,1]])
ainv = inv(a)
print ainv
Пример #51
0
    def calculate(self, fragments, cupdate=0.05):

        nFragBasis = 0
        nFragAlpha = 0
        nFragBeta = 0
        self.fonames = []

        unrestricted = ( len(self.data.mocoeffs) == 2 )

        self.logger.info("Creating attribute fonames[]")

        # Collect basis info on the fragments.
        for j in range(len(fragments)):
            nFragBasis += fragments[j].nbasis
            nFragAlpha += fragments[j].homos[0] + 1
            if unrestricted and len(fragments[j].homos) == 1:
                nFragBeta += fragments[j].homos[0] + 1 #assume restricted fragment
            elif unrestricted and len(fragments[j].homos) == 2:
                nFragBeta += fragments[j].homos[1] + 1 #assume unrestricted fragment

            #assign fonames based on fragment name and MO number
            for i in range(fragments[j].nbasis):
                if hasattr(fragments[j],"name"):
                    self.fonames.append("%s_%i"%(fragments[j].name,i+1))
                else:
                    self.fonames.append("noname%i_%i"%(j,i+1))

        nBasis = self.data.nbasis
        nAlpha = self.data.homos[0] + 1
        if unrestricted:
            nBeta = self.data.homos[1] + 1

        # Check to make sure calcs have the right properties.
        if nBasis != nFragBasis:
            self.logger.error("Basis functions don't match")
            return False

        if nAlpha != nFragAlpha:
            self.logger.error("Alpha electrons don't match")
            return False

        if unrestricted and nBeta != nFragBeta:
            self.logger.error("Beta electrons don't match")
            return False

        if len(self.data.atomcoords) != 1:
            self.logger.warning("Molecule calc appears to be an optimization")

        for frag in fragments:
            if len(frag.atomcoords) != 1:
                msg = "One or more fragment appears to be an optimization"
                self.logger.warning(msg)
                break

        last = 0
        for frag in fragments:
            size = frag.natom
            if self.data.atomcoords[0][last:last+size].tolist() != \
                    frag.atomcoords[0].tolist():
                self.logger.error("Atom coordinates aren't aligned")
                return False
            if self.data.atomnos[last:last+size].tolist() != \
                    frag.atomnos.tolist():
                self.logger.error("Elements don't match")
                return False

            last += size

        # And let's begin!
        self.mocoeffs = []
        self.logger.info("Creating mocoeffs in new fragment MO basis: mocoeffs[]")

        for spin in range(len(self.data.mocoeffs)):
            blockMatrix = numpy.zeros((nBasis,nBasis), "d")
            pos = 0

            # Build up block-diagonal matrix from fragment mocoeffs.
            # Need to switch ordering from [mo,ao] to [ao,mo].
            for i in range(len(fragments)):
                size = fragments[i].nbasis
                if len(fragments[i].mocoeffs) == 1:
                    temp = numpy.transpose(fragments[i].mocoeffs[0])
                    blockMatrix[pos:pos+size, pos:pos+size] = temp
                else:
                    temp = numpy.transpose(fragments[i].mocoeffs[spin])
                    blockMatrix[pos:pos+size, pos:pos+size] = temp
                pos += size

            # Invert and mutliply to result in fragment MOs as basis.
            iBlockMatrix = numpy.inv(blockMatrix)
            temp = numpy.transpose(self.data.mocoeffs[spin])
            results = numpy.transpose(numpy.dot(iBlockMatrix, temp))

            self.mocoeffs.append(results)

            if hasattr(self.data, "aooverlaps"):
                tempMatrix = numpy.dot(self.data.aooverlaps, blockMatrix)
                tBlockMatrix = numpy.transpose(blockMatrix)
                if spin == 0:
                    self.fooverlaps = numpy.dot(tBlockMatrix, tempMatrix)
                    self.logger.info("Creating fooverlaps: array[x,y]")
                elif spin == 1:
                    self.fooverlaps2 = numpy.dot(tBlockMatrix, tempMatrix)
                    self.logger.info("Creating fooverlaps (beta): array[x,y]")
            else:
                self.logger.warning("Overlap matrix missing")

        self.parsed = True
        self.nbasis = nBasis
        self.homos = self.data.homos

        return True
 def update(self):
     next_action = self.pdcontroller.get_next_action(self.state., pf_vec)
     FSFTsigx = self.F.dot(self.sig0).dot(self.F_trans) + self.sigx
     HTBlahInverse = npy.inv(self.H.dot(FSFTsigx).dot(self.H_trans) + self.sigz)
     kalman_gain = FSFTsigx.dot(self.H_trans).dot(HTBlahInverse)
Пример #53
0
def linclosed(TrainingData, y):
	XtX = np.multiply(np.transpose(TrainingData), TrainingData)
	theta = np.multiply(np.inv(XtX), np.multiply(np.transpose(TrainingData), y))
	return theta
Пример #54
0
def disclyap(a1, b1, vecflag = False):
# disclyap -- extension of Hansen/Sargent's DOUBLEJ.M
#
#  function V = disclyap(a1,b1,vecflag)
#  Computes infinite sum V given by
#
#         V = SUM (a1^j)*b1*(a1^j)'
#
#  where a1 and b1 are each (n X n) matrices with eigenvalues whose moduli are
#  bounded by unity, and b1 is an (n X n) matrix.
#  The sum goes from j = 0 to j = infinity.  V is computed by using
#  the following "doubling algorithm".  We iterate to convergence on
#  V(j) on the following recursions for j = 1, 2, ..., starting from
#  V(0) = b1:
#
#       a1(j) = a1(j-1)*a1(j-1)
#       V(j) = V(j-1) + a1(j-1)*V(j-1)*a1(j-1)'
#
#  The limiting value is returned in V.
#
# -----------------------------------------------------------------------------
# EMT added following comments and the vecflag argument (default=false)
# -----------------------------------------------------------------------------
#
# 1) L&S p. 76, write
# X = doublej(A, B * B') solves X = A * X * A' + B * B' where a1 = A and B*B' = b1
#
# 2) Note on algorithm
# * let $V_k \equiv \sum_{j=0}^K A^j C'C A^j'$
# * then $V_{2k} = V_k + A^k V_k A^k'$
# * let $k=2^j$ and rewrite $V_k \equiv V_j$ and $A_j\equiv A^{2^j}$ so that $A_{j+1} = A_j^2$
# * then $V_{j+1} = V_j + A_j^2 V_j A_j^2'$
#
# the doubling uses fewer iterations (namely only $j$ instead of $2^j$ and is thus much faster
#
# 3) Solves "discrete Lyapunov" equation (but can also handle constants in the SS)
#
# 4) new parameter: if vecflag == true: apply vec formula of Hamilton instead of doubling
#    will assume invertibility, i.e. no constant in SS

  if vecflag:
    Np          = np.size(a1, 0)
    Inp2        = np.eye(Np**2)
    vecSIGMA    = np.dot(np.dot(np.inv((Inp2 - np.kron(a1, a1))), Inp2), b2[:])
    V           = np.reshape(vecSIGMA, (Np, Np))
  else:
    alpha0   =  a1
    gamma0   =  b1
    delta  =  5
    ijk   =  1
    while delta > np.finfo(float).eps:
      alpha1   =  alpha0 * alpha0;
      gamma1   =  gamma0 + alpha0 * gamma0 * alpha0.transpose()
      #    delta    =  max(max(abs(gamma1-gamma0)));
      delta    = np.max(np.abs(gamma1[:] - gamma0[:]))
      gamma0   = gamma1
      alpha0   = alpha1
      ijk      = ijk + 1
      if ijk > 50:
        print('Error: ijk = %d, delta = %f check aopt and c for proper configuration' % (ijk, delta))
    V = gamma1
  return V
Пример #55
0
def inverse(hesse):
    if type(hesse) == np.array:
        return np.inv(hesse)
    else:
        return 1. / hesse
Пример #56
0
        W11 = W[W11_indices,W11_indices]
        s12 = W[col, W11_indices]
        #solve using coordinate descent: according to Friedman 2007
        beta = sl.linear_model.lasso_path(W11, s12, rho) 
        #get the last column
        beta = beta[:,-1]

        W[col, W11_indices] = np.dot(W11, beta)
        W[W11_indices, col] = np.transpose(W[col, W11_indices])
    #test stop criterion
    if np.linalg.norm(W_old-W)<stop_criterion:
        break;
    W_old = W

#final result:
Theta = np.inv(W)











        
        

Пример #57
0
def d(g, G, A):
    """ Calculates
    """
    G_inv = np.inv(G.T * G)
    d = g.T * G_inv * A * np.inv(A.T * G_inv * A) * A.T * G_inv * g
    return d
def label_propagation(G, Label_Matrix, aff_mat, label_mapping, num_changed, 
					  repeat, alpha, eps, max_iters, tol):
	"""Performs iterative weighted average algorithm to propagate labels to unlabeled nodes.
	Features: Hard label clamps, deterministic solution.
	See: Zhu and Ghahramani, 2002.
	"""
	restore = max_iters
	n = aff_mat.shape[0]

	# Construct Diagonal Degree Matrix
	diag_degree = compute_diagonal_degree_matrix(aff_mat, inverse=True)
	
	# Variables necessary for the algorithm:
	mu = alpha / (1 - alpha + eps)
	A = sparse.lil_matrix((n, n))
	A = A.tocsr()
	A.setdiag(1 / (1 + mu*diag_degree + mu*eps))
	np.identity(n) + mu*diag_degree + mu*eps

	W = W.tocsr()
	if no_inverse == False:	
		ddmi.setdiag(1 / W.sum(axis=1))	
	else:
		ddmi.setdiag(W.sum(axis=1))		
	ddmi = ddmi.tocsr()

	# Allow for the option to iteratively do the algorithm:
	# i.e. it will rerun if the number of unlabeled_nodes exceeds a threshold
	threshold_unlabeled = 100
	number_unlabeled = num_changed

	while (number_unlabeled > threshold_unlabeled) and (repeat == 1):
	# In one vs. all fashion, iteratively process the weighted average of neighbors.	
		column_index = 0
		for column in Label_Matrix.T:	
			mat_column = np.mat(column).T
			labeled_indices = np.nonzero(mat_column != 0)[0]
			num_members = (np.nonzero(mat_column == 1))[0].shape[1]
			print 'Initial number of members in class {0}: {1}'.format(column_index, num_members)  

			Y_hat_now = mat_column
			Y_hat_next = np.inv(A) * (mu * W * Y_hat_now + column)

			while not_converged(Y_hat_next, Y_hat_now, tol) and max_iters > 0:
				Y_hat_now = Y_hat_next
				Y_hat_next = diag_degree_inv * aff_mat * Y_hat_now
				Y_hat_next[labeled_indices] = mat_column[labeled_indices]
				max_iters -= 1

			class_members = np.nonzero(Y_hat_next>0)[0]
			print 'Current number of members in class {0}: {1}'.format(column_index, class_members.shape[1])

			Label_Matrix[class_members, :] = -1
			Label_Matrix[class_members, column_index] = 1

			column_index += 1
			max_iters = restore

		# Account for the case where an unlabeled node does not get labeled to any class:
		number_unlabeled = 0
		for line in Label_Matrix:
			if 0 in line:
				number_unlabeled += 1 
		print "number of nodes still unlabeled:", number_unlabeled
		repeat = 0

	Graph_Final = label_nodes(G, Label_Matrix, label_mapping)

	return Graph_Final