예제 #1
0
    def robot_arm_to_target(AA, BB):
        [m,n] = AA.shape
        n = n/4

        A = np.zeros(9*n, 9)
        b = np.zeros(9*n,1)
        for i in range(1, n+1):
            Ra = AA[0:3, 4*(i-4):4*i-1]
            Rb = BB[0:3, 4*(i-4):4*i-1]
            A[9*i - 9: 9*i,:] = np.kron(Ra, np.eye(3)) + np.kron(-np.eye(3), Rb.T)

        [u, s, v] = np.linalg.svd(A, full_matrices=True)
        x = v[:, -1]
        R = np.reshape(x[0:9], (3, 3)).T
        R = np.sign(np.det(R)/np.abs(np.det(R)))^(1/3)*R
        [u, s, v] = np.linalg.svd(R, full_matrices=True)
        R = u*v.T
        if np.det(R) < 0:
            R = u*np.diag([1, 1, -1])*v.T
        C = np.zeros(3*n, 3)
        d = np.zeros(3*n, 1)
        I = np.eye(3)
        for i in range(1, n+1):
            C[3*i-3: 3*i, :] = I - AA[0:3, 4*i-4:4*i-1]
            d[3*i-3: 3*i, :] = AA[0:3, 4*i] - np.matlmul(R*BB[0:3, 4*i])
        t = C/d
        X = np.eye(4)
        X[0:3, 0:3] = R
        X[3, 0:3] = t

        return X
예제 #2
0
def ralign(X, Y):
    """Rigid alignment of two sets of points in k-dimensional Euclidean space.
    Given two sets of points in
    correspondence, this function computes the scaling
    rotation, and translation that define the transform TR
    that minimizes the sum of squared errors between TR(X)
    and its corresponding points in Y.  This routine takes
    O(n k^3)-time.

    Inputs:
        X - a k x n matrix whose columns are points
        Y - a k x n matrix whose columns are points that correspond to
           the points in X
    Outputs:
        c, R, t - the scaling, rotation matrix, and translation vector defining
        the linear map TR as TR(x) = c * R * x + t
        such that the average norm of TR(X(:, i) - Y(:, i)) is minimized.
    """
    m, n = X.shape

    mx = X.mean(1)
    my = Y.mean(1)
    Xc = X - np.tile(mx, (n, 1)).T
    Yc = Y - np.tile(my, (n, 1)).T

    sx = np.mean(np.sum(Xc * Xc, 0))
    Sxy = np.dot(Yc, Xc.T) / n

    U, D, V = np.linalg.svd(Sxy, full_matrices=True, compute_uv=True)
    V = V.T.copy()
    # print U,"\n\n",D,"\n\n",V
    r = np.rank(Sxy)
    S = np.eye(m)
    if r > (m - 1):
        if (np.det(Sxy) < 0):
            S[m, m] = -1
        elif (r == m - 1):
            if (np.det(U) * np.det(V) < 0):
                S[m, m] = -1
        else:
            R = np.eye(2)
            c = 1
            t = np.zeros(2)
            return R, c, t

    R = np.dot(np.dot(U, S), V.T)

    c = np.trace(np.dot(np.diag(D), S)) / sx
    t = my - c * np.dot(R, mx)

    return R, c, t
예제 #3
0
def get_transformation(X, Y):
    """

    Args:
        X: k x n source shape
        Y: k x n destination shape such that Y[:, i] is the correspondence of X[:, i]

    Returns: rotation R, scaling c, translation t such that ||Y - (cRX+t)||_2 is minimized.

    """
    """
    Copyright: Carlo Nicolini, 2013
    Code adapted from the Mark Paskin Matlab version
    from http://openslam.informatik.uni-freiburg.de/data/svn/tjtf/trunk/matlab/ralign.m 
    """

    m, n = X.shape

    mx = X.mean(1)
    my = Y.mean(1)
    Xc = X - np.tile(mx, (n, 1)).T
    Yc = Y - np.tile(my, (n, 1)).T

    sx = np.mean(np.sum(Xc * Xc, 0))
    sy = np.mean(np.sum(Yc * Yc, 0))

    M = np.dot(Yc, Xc.T) / n

    U, D, V = np.linalg.svd(M, full_matrices=True, compute_uv=True)
    V = V.T.copy()
    #print U,"\n\n",D,"\n\n",V
    r = np.rank(M)
    d = np.linalg.det(M)
    S = np.eye(m)
    if r > (m - 1):
        if np.det(M) < 0:
            S[m, m] = -1
        elif r == m - 1:
            if np.det(U) * np.det(V) < 0:
                S[m, m] = -1
        else:
            R = np.eye(2)
            c = 1
            t = np.zeros(2)
            return R, c, t

    R = np.dot(np.dot(U, S), V.T)
    c = np.trace(np.dot(np.diag(D), S)) / sx
    t = my - c * np.dot(R, mx)

    return R, c, t
예제 #4
0
def GMM(PCA, NCLASS=2):
    """
    PDF = Pr(G) (2pi)^(k/2)|S|^(-1/2)exp[-1/2 (x-mu)' S^(-1) (x-mu)]
    logPDF = logPr(G) k/2 log(2pi)-1/2log(|S|)-1/2(x-mu)'S^(-1)(x-mu)
    Pr is inverse monotonic with logPr(G)-log(|S|)-(x-mu)'S^(-1)(x-mu)
    """
    N = PCA.shape()[1]
    initsize = N / NCLASS
    classes = np.zeros((N,))
    oldclasses = np.zeros((N,))
    Pr = np.zeros((N, NCLASS))
    partition = (2 * pi) ** (-0.5 * NCLASS)
    for i in range(NCLASS):
        classes[i * initsize : (i + 1) * initsize] = i
    for ii in range(2000):
        for i in range(NCLASS):
            c = PCA[:, classes == i]
            Mu = np.mean(c, 1)
            Cm = np.cov((c.T - Mu).T)
            k = np.shape(c)[1]
            Pm = np.pinv(Cm)
            center = PCA.T - Mu
            normalize = partition * k / (N + 1.0) / np.sqrt(np.det(Cm))
            Pr[:, i] = np.exp(-0.5 * np.array([np.dot(x, np.dot(Pm, x.T)) for x in center])) * normalize
        oldclasses[:] = classes
        classes = argmax(Pr, 1)
        if all(oldclasses == classes):
            break
    classification = (Pr.T / sum(Pr, 1)).T
    return classification, Pr
예제 #5
0
def gauss_pdf(X, M, S):

    if M.shape[1] == 1:

        DX = X - np.tile(M, X.shape[1])
        E = 0.5 * np.sum(DX * (np.dot(np.linalg.inv(S), DX)), axis=0)
        E = E + 0.5 * M.shape[0] * np.log(2 * np.pi) + 0.5 * np.log(
            np.linalg.det(S))
        P = np.exp(-E)

    elif X.shape[1] == 1:

        DX = np.tile(X, M.shape()[1]) - M
        E = 0.5 * np.sum(DX * (np.dot(np.linalg.inv(S), DX)), axis=0)
        E = E + 0.5 * M.shape[0] * np.log(2 * np.pi) + 0.5 * np.log(
            np.linalg.det(S))
        P = np.exp(-E)

    else:

        DX = X - M
        E = 0.5 * np.dot(DX.T, np.dot(np.inv(S), DX))
        E = E + 0.5 * M.shape[0] * np.log(2 * np.pi) + 0.5 * np.log(np.det(S))
        P = np.exp(-E)

    return (P[0], E[0])
예제 #6
0
def lfsrsolve(v, n):
    """Given a guess n for the length of the recurrence that generates
    the binary vector v, this function returns the coefficients of the
    recurrence."""
    v = v[:]

    vln = len(v)

    if (vln < 2 * n):
        raise ValueError('The vector v needs to be atleast length 2n')

    M = np.array(circmat(v, n))
    Mdet = np.det(M)

    x = v[n + 2:2 * n]

    Minv = np.inv(M)
    Minv = np.mod(np.round(Minv * Mdet), 2)
    # A note: Technically, the round() function should never show up, but
    # since Matlab does double precision arithmetic to calculate the inverse matrix
    # we need to bring the result back to integer values so we can perform a meaningful
    # mod operation. As long as this routine is not used on huge examples, it should
    # be ok

    y = np.mod(Minv * x, 2)
    y = y[:].T  # Convert the output to a row vector
    return y
def is_left(x0, x1, x2):
    """
    Return True if x0 is left of the line between x1 and x2. False otherwise.
    """
    assert x1.shape == x2.shape == (2, )
    matrix = np.array([x1 - x0, x2 - x0])
    if len(x0.shape) == 2:
        matrix = matrix.transpose((1, 2, 0))
    return np.det(matrix) > 0
def depth(X, P):
    T = X[3]
    M = P[:, 0:3]
    p4 = P[:, 3]
    m3 = M[2, :]

    x = np.dot(P, X)
    w = x[2]
    X = X / w
    return (np.sign(np.det(M)) * w) / (T * np.norm(m3))
예제 #9
0
파일: umeyama.py 프로젝트: Abdob/maplab_ws
def umeyama(X, Y):
  assert X.shape[0] == 3
  assert Y.shape[0] == 3
  assert X.shape[1] > 0
  assert Y.shape[1] > 0

  m, n = X.shape

  mx = X.mean(1)
  my = Y.mean(1)

  Xc = X - np.tile(mx, (n, 1)).T
  Yc = Y - np.tile(my, (n, 1)).T

  sx = np.mean(np.sum(Xc * Xc, 0))
  sy = np.mean(np.sum(Yc * Yc, 0))
  Sxy = np.dot(Yc, Xc.T) / n

  U, D, V = np.linalg.svd(Sxy, full_matrices=True, compute_uv=True)
  V = V.T.copy()

  r = np.rank(Sxy)
  d = np.linalg.det(Sxy)
  S = np.eye(m)
  if r > (m - 1):
    if (np.det(Sxy) < 0):
      S[m, m] = -1
    elif (r == m - 1):
      if (np.det(U) * np.det(V) < 0):
        S[m, m] = -1
    else:
      R = np.eye(2)
      c = 1
      t = np.zeros(2)
      return R, c, t

  R = np.dot(np.dot(U, S), V.T)

  c = np.trace(np.dot(np.diag(D), S)) / sx
  t = my - c * np.dot(R, mx)

  return R, t, c
예제 #10
0
def lfsrlength(v, n):
    """This function tests the vector v of bits to see if it is generated
    by a linear feedback recurrence of length at most n"""

    print('Order   Determinant')
    for j in range(n):
        M = circmat(v, j)
        Mdet = np.mod(np.det(np.array(M)), 2)
        print(num2str(double(j)), '        ', num2str(double(Mdet)))
    end
    return y
예제 #11
0
def mahalDist2(x, y, xCov, yCov, kFactor=3):
    BIG_L = TOM_SAPS.BIG_L

    #perform mahalanobis calculation of type 2
    xRows = x.shape[0]
    yRows = y.shape[0]
    mahalDistance = np.empty((xRows, yRows))
    normEucliDist = np.zeros_like(mahalDistance)
    # currently can't find a clever way to do this without nested 'for' loops
    for xR in range(xRows):
        for yR in range(yRows):
            cov1 = xCov[xR]
            cov2 = yCov[yR]
            # find the determinantes of the covariance matrices:
            det1 = np.linalg.det(cov1)
            det2 = np.linalg.det(cov2)

            covInv = np.linalg.inv(cov1 + cov2)

            # check to see if one is larger than the other, if so, transform into that matrix
            if det1 > det2:
                eigVals, eigVecs = np.linalg.eig(cov1)
            else:
                eigVals, eigVecs = np.linalg.eig(cov2)

            sigma1 = np.dot(eigVecs, np.dot(cov1, eigVecs.transpose()))
            sigma2 = np.dot(eigVecs, np.dot(cov2, eigVecs.transpose()))

            sigmaTot = sigma1 + sigma2
            sigmaInv = np.linalg.inv(sigmaTot)
            nLogSig = np.log(np.det(sigmaTot))
            # gate
            kAdj = kFactor * (np.linalg.norm(np.diag(sigma1)) +
                              np.linalg.norm(np.diag(sigma2)))

            # calcultate distance parameter
            dX = eigVecs * x[xR] + eigVecs * y[yR]
            normEucliDist[xR, yR] = np.linalg.norm(dX)

            bhattMahalDist = np.dot(dX.transpose(), np.dot(sigmaInv, dX))

            # not sure what these were used for previously but here is the python version of dd and ddx
            #bhattMahalDiff = x[xR] - y[yR]     # formerly ddx
            # formerly dd
            #bhattDistance = np.dot(bhattMahalDiff.transpose(), np.dot(covInv, bhattMahalDiff))

            if np.sqrt(bhattMahalDist) > kAdj:
                mahalDistance[xR, yR] = BIG_L
            else:
                mahalDistance[xR, yR] = bhattMahalDist + nLogSig
    # return mahalDistance and normEucliDist parameters
    return mahalDistance, normEucliDist
예제 #12
0
def ralign(X, Y):
    m, n = X.shape

    mx = X.mean(1)
    my = Y.mean(1)

    Xc = X - np.tile(mx, (n, 1)).T
    Yc = Y - np.tile(my, (n, 1)).T

    sx = np.mean(np.sum(Xc * Xc, 0))
    sy = np.mean(np.sum(Yc * Yc, 0))

    Sxy = np.dot(Yc, Xc.T) / n

    U, D, V = np.linalg.svd(Sxy, full_matrices=True, compute_uv=True)
    V = V.T.copy()
    #print U,"\n\n",D,"\n\n",V
    #     r = np.rank(Sxy);
    r = Sxy.ndim
    d = np.linalg.det(Sxy)
    S = np.eye(m)
    if r > (m - 1):
        if (np.det(Sxy) < 0):
            S[m, m] = -1
        elif (r == m - 1):
            if (np.det(U) * np.det(V) < 0):
                S[m, m] = -1
        else:
            R = np.eye(2)
            c = 1
            t = np.zeros(2)
            return R, c, t

    R = np.dot(np.dot(U, S), V.T)

    c = np.trace(np.dot(np.diag(D), S)) / sx
    t = my - c * np.dot(R, mx)

    return R, c, t
예제 #13
0
파일: tcsolver.py 프로젝트: liangjj/BdG2.0
    def determinant(self, T):
        ''' Calculate the determinant of the matrix obtained by linearizing the
        the SCF equation.
        '''

        phi = self.overlaps()
        emin = -self.parameters.material.debye_energy
        emax = self.parameters.material.debye_energy
        i = np.arange(0, self.parameters.nu)[None, :]

        F = partial(self.weight, T)
        dosi = partial(self.ham.dos, i)
        M = self.energy_integration(F, emin, emax, dosi)
        return np.det(M*phi)
예제 #14
0
 def prob(self, x, y):
     '''
         returns the probability for a point to
         be in this cluster
     '''
     p = np.array([x, y])
     tmp = np.dot((p-self.mu).T, np.linalg.inv(self.sigma))
     tmp = np.dot(tmp, p-self.mu)
     left = math.exp(-0.5*tmp)
     tmp = 2*math.pi
     tmp = math.pow(tmp, 2)
     tmp = tmp*np.det(self.sigma)
     tmp = 1.0/math.sqrt(tmp)
     return tmp*left
예제 #15
0
파일: berry.py 프로젝트: woal777/pygra
def curvature(h, k=np.array([0., 0.]), dk=0.01, n=2):
    """Calculates the Berry curvature of a kpoint
  of the n lowest bands below E=0"""
    hkgen = h.get_hk_gen()  # function which generates hk

    def get_wf(hk, n2=n):
        """Get lowest bands"""
        e, wf = lg.eigsh(csc_matrix(hk), k=n2, which="LM", sigma=0.0)  # get wf
        wfsout = []
        ni = 0  # number of waves found
        wf = wf.transpose()  # transpose waves
        for ie, iwf in zip(e, wf):
            if ie < 0.0:
                wfsout.append(iwf)
                ni += 1
            if ni == n:
                n = ni + 0  # change global value
                return wfsout  # return waves
        return get_wf(hk,
                      n2=n2 + 2)  # if not eough waves, try with two more...

    # now calculate berry
    dx = np.array([dk / 2., 0.])
    dy = np.array([0., dk / 2.])
    k1 = k - dx - dy  # kpoints
    k2 = k + dx - dy
    k3 = k + dx + dy
    k4 = k - dx + dy
    wk1 = get_wf(k1)  # waves
    wk2 = get_wf(k2)
    wk3 = get_wf(k3)
    wk4 = get_wf(k4)

    def mij(wis, wjs):
        """Calculate matrix"""
        nw = len(wis)
        m = np.matrix(np.zeros((nw, nw)))  # create matrix
        for i in range(nw):
            for j in range(nw):
                m[i, j] = wis[i].H * wjs  # matrix element

    phi = np.det(
        mij(wk1, wk2) * mij(wk2, wk3) * mij(wk3, wk4) *
        mij(wk4 * wk1))  # deter
    phi = np.arctan2(phi.imag, phi.real) / (dk * dk)  # phase
    return phi
예제 #16
0
파일: berry.py 프로젝트: joselado/pygra
def curvature(h,k=np.array([0.,0.]),dk=0.01,n=2):
  """Calculates the Berry curvature of a kpoint
  of the n lowest bands below E=0"""
  hkgen = h.get_hk_gen()  # function which generates hk
  def get_wf(hk,n2=n):
    """Get lowest bands"""
    e,wf = lg.eigsh(csc_matrix(hk),k=n2,which="LM",sigma=0.0) # get wf
    wfsout = []
    ni = 0 # number of waves found
    wf = wf.transpose() # transpose waves
    for ie,iwf in zip(e,wf):
      if ie<0.0:
        wfsout.append(iwf)
        ni += 1
      if ni==n:
        n = ni + 0 # change global value
        return wfsout # return waves
    return get_wf(hk,n2=n2+2)  # if not eough waves, try with two more...
  # now calculate berry 
  dx = np.array([dk/2.,0.])
  dy = np.array([0.,dk/2.])
  k1 = k - dx - dy  # kpoints
  k2 = k + dx - dy
  k3 = k + dx + dy
  k4 = k - dx + dy
  wk1 = get_wf(k1)  # waves
  wk2 = get_wf(k2)
  wk3 = get_wf(k3)
  wk4 = get_wf(k4)
  def mij(wis,wjs):
    """Calculate matrix"""
    nw = len(wis)
    m = np.matrix(np.zeros((nw,nw))) # create matrix
    for i in range(nw):
      for j in range(nw):
        m[i,j] = wis[i].H * wjs  # matrix element
  phi = np.det(mij(wk1,wk2)*mij(wk2,wk3)*mij(wk3,wk4)*mij(wk4*wk1) ) # deter
  phi = np.arctan2(phi.imag,phi.real)/(dk*dk) # phase
  return phi
예제 #17
0
def compute_P_from_essential(E):
    """    Computes the second camera matrix (assuming P1 = [I 0]) 
        from an essential matrix. Output is a list of four 
        possible camera matrices. """
    
    # make sure E is rank 2
    U,S,V = np.svd(E)
    if np.det(np.dot(U,V))<0:
        V = -V
    E = dot(U,dot(diag([1,1,0]),V))    
    
    # create matrices (Hartley p 258)
    Z = skew([0,0,-1])
    W = array([[0,-1,0],[1,0,0],[0,0,1]])
    
    # return all four solutions
    P2 = [vstack((dot(U,dot(W,V)).T,U[:,2])).T,
             vstack((dot(U,dot(W,V)).T,-U[:,2])).T,
            vstack((dot(U,dot(W.T,V)).T,U[:,2])).T,
            vstack((dot(U,dot(W.T,V)).T,-U[:,2])).T]

    return P2
예제 #18
0
def calculate_KL_div(new_mu, prev_mu, cur_traj_dist, prev_traj_dist):
    """ Calculate KL divergence for two multivariate Gaussian distributions. """

    T, du, dx = cur_traj_dist.dimensions

    # (1 x T) matrix, div for each time step
    kl_div = np.zeros((1, T))

    for t in range(T):

        new_mu_t = new_mu[t, :]
        prev_mu_t = prev_mu[t, :]

        prev_cov = prev_traj_dist.covar[t, :, :]
        new_cov = cur_traj_dist.covar[t, :, :]
        new_inv_cov = cur_traj_dist.inv_cov[t, :, :]

        print(prev_cov.shape)
        print(new_cov.shape)
        print(new_inv_cov.shape)

        kl_div_t = 0.5 * (np.trace(new_inv_cov * prev_cov) +\
                   (new_mu_t - prev_mu_t).T.dot(new_inv_cov).dot(new_mu_t - prev_mu_t) - T + np.log(np.det(new_cov)) - np.log(np.det(prev_cov)))

        kl_div[t] = max(0, kl_div_t)

    # sum total kl_div over all time steps
    return np.sum(kl_div)
    display[:, n_cols:] = T2

    for pi, pj in matches:
        cv2.plot([K1[pi][0], K2[pj][0] + n_cols], [K1[pi][1], K2[pj][1]],
                 marker='o',
                 linestyle='-',
                 color=rcolor())

    cv2.imshow(display, cmap=np.cm.gray)


show_matches(m)
xi = K1[m[:, 0], :]
xj = K2[m[:, 1], :]
F, status = cv2.findFundamentalMat(xi, xj, cv2.FM_RANSAC, 0.5, 0.9)
assert (np.det(F) < 1.e-7)
is_inlier = np.array(status == 1).reshape(-1)

inlier_i = xi[is_inlier]
inlier_j = xj[is_inlier]
hg = lambda x: np.array([x[0], x[1], 1])
K = np.array([[1520.4, 0., 302.32], [0, 1525.9, 246.87], [0, 0, 1]])
E = np.dot(K.T, np.dot(F, K))
U, s, VT = np.linalg.svd(E)

if np.det(np.dot(U, VT)) < 0:
    VT = -VT
E = np.dot(U, np.dot(np.diag([1, 1, 0]), VT))
V = VT.T

# Let's check Nister (2004) Theorem 3 constraint:
예제 #20
0
파일: elastic.py 프로젝트: IanHawke/plelrel
 def __init__(self, eos, prim, advected):
     # Store the eos
     self.eos = eos
     # These are the primitive quantities we store
     psi = numpy.zeros((3, 4))
     psi[:, 1:] = numpy.reshape(prim[0:9], (3, 3))
     v_up = prim[9:12]
     entropy = prim[12]
     # These are the advected quantities, which are the reference metric:
     k_X_down = numpy.reshape(advected[0:9], (3, 3))
     # The map has timelike components, which need computing:
     psi[:, 0] = numpy.dot(psi[:, 1:], v_up)
     # The Lorentz factor is simple in flat space
     W = 1 / numpy.sqrt(1 - numpy.dot(v_up, v_up))
     # The four velocity follows
     u_up = W * numpy.array([-1.0, v_up[0], v_up[1], v_up[2]])
     # The Minkowski metric gives us the projector
     g_M_up = numpy.diag([-1.0, 1.0, 1.0, 1.0])
     h_M_up = g_M_up + numpy.outer(u_up, u_up)
     g_M_down = g_M_up # Minkowski!
     h_M_down = g_M_down @ h_M_up @ g_M_down.T
     # Get the other velocities
     u_down = g_M_down @ u_up
     v_down = g_M_down[1:, 1:] @ v_up
     # Now project the matrix to the reference space
     g_X_up = psi @ g_M_up @ psi.T
     # Now take the advected metric and raise an index
     k_X_mixed = g_X_up @ k_X_down
     # The number density is the square root of the determinant of this
     n = numpy.sqrt(numpy.det(k_X_mixed))
     # Now we need the inverse of g on the reference space
     g_X_down = numpy.inv(g_X_up)
     # And then we need eta to compute invariants:
     eta_X_down = k_X_down / n**(2/3)
     eta_X_mixed = k_X_mixed / n**(2/3)
     # Now compute invariants
     I_1 = numpy.trace(eta_X_mixed)
     I_2 = numpy.trace(eta_X_mixed @ eta_X_mixed)
     # Now the shear scalar, toy EOS style
     shear_S = (I_1**3 - I_1 * I_2 - 18) / 24
     # Now compute EOS etc
     # This is the Toy_2 EOS, which is (I.4-10) in GHE.
     # The entropy that's stored in the primitive variable is used as K(s)
     enthalpy = eos.enthalpy(n, entropy, shear_S)
     p = eos.pressure(n, entropy, shear_S)
     epsilon = enthalpy - 1 - p / n
     # This uses the Toy_2 EOS
     f_1, f_2 = eos.fs(n, entropy, shear_S, I_1, I_2)
     # Now compute pi
     pi_X_down = 2 * n * (f_1 * (eta_X_down - g_X_down * I_1 / 3) +
                          2 * f_2 * (eta_X_down @ eta_X_mixed -
                                     g_X_down * I_2 / 3))
     pi_M_down = psi.T @ pi_X_down @ psi
     # Construct full pressure tensor
     p_M_down = p * h_M_down + pi_M_down
     # Now construct the von Mises scalars
     pi_X_mixed = g_X_up @ pi_X_down
     J_1 = numpy.trace(pi_X_mixed)
     J_2 = numpy.trace(pi_X_mixed @ pi_X_mixed)
     # Now construct the conserved variables
     # Have taken advantage of Minkowski space in many places here.
     S = n * enthalpy * W**2 * v_down + (g_M_up @ pi_M_down)[0, 1:]
     tau = n * (enthalpy * W**2 - W) - p + pi_M_down[0, 0]
     # Now construct the fluxes at this point
     # This is solely the flux in the x direction
     f_psi = numpy.zeros((3, 3))
     f_psi[:, 0] = psi @ v_up
     f_S = (n * enthalpy * W**2 * v_up[0] * v_down +
            (g_X_up @ p_M_down)[1, 1:])
     f_tau = (n * (enthalpy * W**2 - W) * v_up[0] +
              numpy.dot((g_M_up @ pi_M_down)[1, 1:], v_up))
     # Now store everything
     self.psi = psi
     self.v_up = v_up
     self.W = W
     self.n = n
     self.p = p
     self.epsilon = epsilon
     self.entropy = entropy
     self.h = h
     self.k_X_down = k_X_down
     self.k_X_mixed = k_X_mixed
     self.g_X_up = g_X_up
     self.g_X_down = g_X_down
     self.g_M_up = g_M_up
     self.g_M_down = g_M_down
     self.eta_X_down = eta_X_down
     self.eta_X_up = eta_X_up
     self.I_1 = I_1
     self.I_2 = I_2
     self.J_1 = J_2
     self.J_2 = J_2
     self.f_1 = f_1
     self.f_2 = f_2
     self.pi_X_down = pi_X_down
     self.pi_X_mixed = pi_X_mixed
     self.pi_M_down = pi_M_down
     self.p_M_down = p_M_down
     self.S = S
     self.tau = tau
     self.f_psi = f_psi
     self.f_S = f_S
     self.f_tau = f_tau
예제 #21
0
def logmvnpdf(x, mu, K, logdetK=None, opt1='standard'):
    """Calculate the log multivariate normal probability density at x.
    
    logmvnpdf calculates the natural logarithm of the probability density of 
    the samples contained in x.
    
    Args:
        x: Samples to calculate probability for. x can be given as a single 
        vector (1-D array), or as a matrix ((n x d) array). In the latter case
        if mu is a matrix ((n x d) array), and K is a (n x d x d) array, the 
        probability of the i-th row of x will be calculated under the multi-
        variate normal with its mean given by the i-th row of mu, and its 
        covariance given by the i-th plane of K.
        
        mu: Mean of distribution.
        
        K: Covariance matrix of multivariate normal distribution.
        
        logdetK: Natural log of determinant(s) of K. Float (if only one K) or 
        (n x 1) float array (if several K)
        
        opt1: Method of interpreting K. If set to 'standard', K will be 
        interpreted as the standard covariance matrix. If set to 'inverse', K 
        will be interpreted as the inverse covariance matrix.
    
    Returns:
        logprob: the logarithm of the probability density of the samples under
        the given distribution(s). Length (n) float array.
        
    Example call:
        >>> # Calculate probability of one sample under one distribution
        >>> x = np.array([1.,2.,3.,5.])
        >>> mu = np.array([0.])
        >>> K = np.array([3.])
        >>> logmvnpdf(x,mu,K)
        -3.3871832107433999
        
        >>> # Calculate probability of one sample under one distribution
        >>> x = np.array([1.,2.])
        >>> mu = np.array([0.,0.])
        >>> K = np.array([[2.,1.],[1.,2.]])
        >>> logmvnpdf(x,mu,K)
        -3.3871832107433999
        
        >>> # Calculate probabiliy of three samples under one distribution
        >>> x = np.array([[1.,2.],[0,0],[-1,-2]])
        >>> mu = np.array([0.,0.])
        >>> K = np.array([[2,1],[1,2]])
        >>> logmvnpdf(x,mu,K)
        array([-3.38718321, -2.38718321, -3.38718321])
        
        >>> # Calculate probability of three samples with three different means
        >>> # and one covariance matrix
        >>> x = np.array([[1.,2.],[0,0],[-1,-2]])
        >>> mu = np.array([[0.,0.],[1,1],[2,2]])
        >>> x -= mu
        >>> mu = np.array([0.,0])
        >>> K = np.array([[2,1],[1,2]])
        >>> logmvnpdf(x,mu,K)
        array([-3.38718321, -2.72051654, -6.72051654])
    """

    # If K is one-dimensional, just calculate normpdf of all samples
    if K.size == 1:
        if not (isinstance(mu, int) or isinstance(mu, float)):
            mu = mu.item(0)
        if not (isinstance(K, int) or isinstance(K, float)):
            K = K.item(0)
        return -.5 * np.log(2 * np.pi * K) - .5 * ((x - mu)**2) / K

    # Remove extraneous dimension from x and mu
    x = np.squeeze(x)
    mu = np.squeeze(mu)
    z = (x - mu).T
    # Make sure there are as many samples as covariance matrices and figure out
    # how many total calculations we'll need to do

    # Calculate inverses and log-determinants if necessary
    if not opt1.lower() == 'inverse':
        # Have multiple covariance matrices been supplied?
        if len(K.shape) == 3:
            # Calculate inverses
            Kinv = np.linalg.inv(K)
            # Calculate log determinants
            if logdetK is None:
                logdetK = np.log(np.linalg.det(K))
        else:
            # Calculate inverse
            Kinv = np.linalg.inv(K)
            # Calculate log determinant
            if logdetK is None:
                logdetK = np.log(np.linalg.det(K))
    else:
        Kinv = K.copy()
        # Have log-determinants been provided?
        if logdetK is None:
            # Multiple covariance matrices?
            K = np.linalg.inv(K)
            detK = np.det(K)
            logdetK = np.log(detK)

    # Calculate matrix product of z*Kinv*z.T for each Kinv and store it in y.
    temp1 = np.dot(Kinv, z)
    if len(z.shape) == 2:
        mat_prod = (z * temp1).sum(0)
    else:
        mat_prod = np.dot(z, temp1)

    # Get dimension of system
    dim = z.shape[0]

    # Calculate final log probability
    logprob = -.5 * (dim * np.log(2 * np.pi) + logdetK + mat_prod)

    # Remove extraneous dimension
    return np.squeeze(logprob)
예제 #22
0
 def max_prob():
     leafs = self.leaf_nodes()
     probs = []
     for l in leafs:
         probs.append(np.det(cov)) * math.sqrt(2 * math.pi)
     return max(probs)
x=np.arange(0,100,1)
runfile('C:/Users/Reizkian Yesaya/.spyder-py3/E X P E R I M E N T.py', wdir='C:/Users/Reizkian Yesaya/.spyder-py3')

## ---(Sat Nov 17 07:02:39 2018)---
runfile('C:/Users/Reizkian Yesaya/.spyder-py3/E X P E R I M E N T.py', wdir='C:/Users/Reizkian Yesaya/.spyder-py3')
Me
np.e-31
np.e
import numpy as np
a=np.array([1,2],[3,4])
a=np.array([1,2];[3,4])
a=np.array([[1,2],[3,4]])
a
a.T
np.det(a)
a.max
a
a.t
inv(a)
np.inv(a)
np.invert(a)
a*np.invert(a)
a*a
np.dot(a,a)
a
np.dot(a,np.invert(a))
np.dot(a,np.linalg.inv(a))
import numpy as np

A=np.array([[2,4],[1,-1]])
예제 #24
0
    def test_lu(self):

        import math

        A = np.array([[3, 17, 10], [2, 4, -2], [6, 18, -12]])
        LU = np.array([[6, 18, -12], [1 / 2, 8, 16], [1 / 3, -1 / 4, 6]])
        P = np.array([2, 0, 1], dtype=np.uint16)

        # lu decomposition
        lu, p = np.lu(A)
        for i, ei in enumerate(lu):
            for j, eij in enumerate(ei):
                self.assertEqual(eij, LU[i][j])

        for i, ei in enumerate(p):
            self.assertEqual(ei, P[i])

        # determinant
        self.assertEqual(np.det(A), 6 * 8 * 6)

        # zero determinant, singular
        AA = np.array([[3, 17, 10], [2, 4, -2], [3, 17, 10]])
        self.assertEqual(np.det(AA), 0)

        # vector solve
        b = np.array([1, -1, 0])
        x = np.solve(A, b)
        X = [-1.375, 0.375, -0.125]
        for i, ei in enumerate(x):
            self.assertEqual(ei, X[i])

        res = np.dot(A, x) - b
        for i, ei in enumerate(res):
            self.assertEqual(ei, 0)

        # vector solve. singular
        with self.assertRaises(ValueError):
            x = np.solve(AA, b)

        # matrix solve
        b = np.array([[1, 0], [-1, 1], [0, 0]])
        x = np.solve(A, b)
        X = [[-1.375, 4 / 3], [0.375, -1 / 3], [-0.125, 1 / 6]]
        for i, ei in enumerate(x):
            for j, eij in enumerate(ei):
                self.assertEqual(eij, X[i][j])

        res = np.dot(A, x) - b
        for i, ei in enumerate(res):
            for j, eij in enumerate(ei):
                self.assertTrue(math.fabs(eij) < 1e-6)

        # matrix inverse
        Ai = np.inv(A)
        I = np.eye(3)
        res = np.dot(A, Ai) - I
        for i, ei in enumerate(res):
            for j, eij in enumerate(ei):
                self.assertTrue(math.fabs(eij) < 1e-6)

        res = np.dot(Ai, A) - I
        for i, ei in enumerate(res):
            for j, eij in enumerate(ei):
                self.assertTrue(math.fabs(eij) < 1e-6)

        Ais = np.solve(A, I)
        res = Ai - Ais
        for i, ei in enumerate(res):
            for j, eij in enumerate(ei):
                self.assertTrue(math.fabs(eij) < 1e-6)

        # inverse. singular
        with self.assertRaises(ValueError):
            Ai = np.inv(AA)
예제 #25
0
파일: lqr.py 프로젝트: Etragas/GPSDrone
def calculate_KL_div(new_mu, prev_mu, cur_traj_dist, prev_traj_dist):
    """ Calculate KL divergence for two multivariate Gaussian distributions. """

    T, du, dx = cur_traj_dist.dimensions

    # (1 x T) matrix, div for each time step
    kl_div = np.zeros((1, T))

    for t in range(T):

        new_mu_t = new_mu[t,:]
        prev_mu_t = prev_mu[t,:]

        prev_cov = prev_traj_dist.cov[t,:,:]
        new_cov = cur_traj_dist.cov[t,:,:]
        new_inv_cov = cur_traj_dist.inv_cov[t,:,:]

        kl_div_t = 0.5 * (np.trace(new_inv_cov * prev_cov) +\
                   (new_mu_t - prev_mu_t).T.dot(new_inv_cov).dot(new_mu_t - prev_mu_t) - T + np.log(np.det(new_cov)) - np.log(np.det(prev_cov)))

        kl_div[t] = max(0, kl_div_t)

    # sum total kl_div over all time steps
    return np.sum(kl_div)
예제 #26
0
def FastSLAM_1_known_correspondences_step(z_t, c_t, u_t, Y_t_1, observed_features):

	# initialise Q_t
	variance_err = .1
	Q_t = variance_err*np.eye(3)

	weights = np.ones(len(Y_t_1),6)

	# Loop over all particles in particleswarm
	for iterator in range(0,Y_t_1.shape[0]):
		particle = Y_T_1[iterator]
		# Retrieve location and bearing of the particle
		x_t_1 = particle[0]



		# Sample the new location and bearing with the help of the motion model and the previous location.
		x_t = sample(x_t_1, u_t)

		# For every observed feature.
		for j in c_t:
			if not(j in observed_features):
				observed_features.append(j)

				# Calculate mean
				mean = 

				# Calculate H
				H = 

				# Calculation the covariance
				covariance = np.multiply(np.multiply(np.inv(H),Q_t),np.transpose(np.inv(H))) 

				# Insert new Landmark
				new_landmark = [mean, covariance]
				particle[j] = new_landmark

				#Update Weights
				weights[iterator] = np.ones(6)
			else:
				# Extract Landmark location
				landmark = particle[j]

				# Measurement prediction
				z_measurement = 

				# Calculate Jacobian
				H = 

				# Measurement Covariance
				Q = np.multiply(np.multiply(H,landmark[1]),np.transpose(H)) + Q_t

				# Calculate Kalman gain
				K = np.multiply(np.multiply(landmark[1],np.transpose(H)),Q)
				
				# update mean
				mean = landmark[0] + np.multiply(K,(z_t-z_measurement))

				# Update Covariance
				covariance = np.multiply((np.eye(np.shape(K)[0]) - np.multiply(K,H)),landmark[1]) 

				# update Weights
				weights[iterator] = np.power(np.det(2*math.pi*Q),-0.5)*np.exp(0.5*(np.transpose(z_t - z_measurement))*np.inv(Q)*(z_t - z_measurement))


	Y_t = np.zeros(Y_t_1.shape())

	for iterator in range(0,Y_t_1.shape[0]):

		# Sample new Y_t
		Y_t[iterator] = sample_Y(Y_t_1[iterator])









	return Y_t
예제 #27
0
def evans(yl,yr,lamda,s,p,m,e):

    if e['evans'] == "reg_reg_polar":
        muL = np.trace(np.dot(np.dot(np.conj((linalg.orth(yl)).T),
                            e['LA'](e['Li'][0],lamda,s,p)),linalg.orth(yl)))
        muR = np.trace(np.dot(np.dot(np.conj((linalg.orth(yr)).T),
                            e['RA'](e['Ri'][0],lamda,s,p)),linalg.orth(yr)))

        omegal,gammal = manifold_polar(e['Li'],linalg.orth(yl),lamda,e['LA'],
                                        s,p,m,e['kl'],muL)
        omegar,gammar = manifold_polar(e['Ri'],linalg.orth(yr),lamda,e['RA'],
                                        s,p,m,e['kr'],muR)

        out = (linalg.det(np.dot(np.conj(linalg.orth(yl).T),yl))*
               linalg.det(np.dot(np.conj(linalg.orth(yr).T),yr))*gammal*
               gammar*linalg.det(np.concatenate((omegal,omegar),axis=1)))

    elif e['evans'] == "adj_reg_polar":
        muL = np.trace(np.dot(np.dot(np.conj((linalg.orth(yl)).T),
                            e['LA'](e['Li'][0],lamda,s,p)),linalg.orth(yl)))
        muR = np.trace(np.dot(np.dot(np.conj((linalg.orth(yr)).T),
                            e['RA'](e['Ri'][0],lamda,s,p)),linalg.orth(yr)))

        omegal,gammal = manifold_polar(e['Li'],linalg.orth(yl),lamda,e['LA'],
                                        s,p,m,e['kl'],muL)
        omegar,gammar = manifold_polar(e['Ri'],linalg.orth(yr),lamda,e['RA'],
                                        s,p,m,e['kr'],muR)

        out = (np.conj(linalg.det(np.dot(np.conj(linalg.orth(yl).T),yl)))*
                linalg.det(np.dot(np.conj(linalg.orth(yr).T),yr))*
                np.conj(gammal)*gammar*linalg.det(
                np.conj(omegal.T).dot(omegar)))

    elif e['evans'] == "reg_adj_polar":
        muL = np.trace(np.dot(np.dot(np.conj((linalg.orth(yl)).T),
                        e['LA'](e['Li'][0],lamda,s,p)),linalg.orth(yl)))
        muR = np.trace(np.dot(np.dot(np.conj((linalg.orth(yr)).T),
                        e['RA'](e['Ri'][0],lamda,s,p)),linalg.orth(yr)))

        omegal,gammal = manifold_polar(e['Li'],linalg.orth(yl),lamda,e['LA'],
                                        s,p,m,e['kl'],muL)
        omegar,gammar = manifold_polar(e['Ri'],linalg.orth(yr),lamda,e['RA'],
                                        s,p,m,e['kr'],muR)

        out = ( linalg.det(np.dot(np.conj(linalg.orth(yl).T),yl))*
                    np.conj(linalg.det(np.dot(np.conj(linalg.orth(yr).T),yr)))*
                    np.conj(gammar)*gammal*linalg.det(
                    np.conj(omegar.T).dot(omegal)))


    elif e['evans'] == "adj_reg_compound":
        Lmani = manifold_compound(e['Li'],wedgieproduct(yl),lamda,s,p,m,
                                  e['LA'],e['kl'],1)
        Rmani = manifold_compound(e['Ri'],wedgieproduct(yr),lamda,s,p,m,
                                  e['RA'],e['kr'],-1)

        out = np.inner(np.conj(Lmani),Rmani)

    elif e['evans'] == "reg_adj_compound":
        Lmani = manifold_compound(e['Li'],wedgieproduct(yl),lamda,s,p,m,
                                  e['LA'],e['kl'],1)
        Rmani = manifold_compound(e['Ri'],wedgieproduct(yr),lamda,s,p,m,
                                  e['RA'],e['kr'],-1)

        out = np.inner(np.conj(Rmani),Lmani)

    elif e['evans'] == "reg_reg_bvp_cheb":
        VLa,VLb = bvp_basis_cheb(s,p,m,lamda,e['A_pm'],e['LA'],-1,e['kl'])
        VRa,VRb = bvp_basis_cheb(s,p,m,lamda,e['A_pm'],e['RA'],1,e['kr'])
        temp = linalg.null_space(yl.T)
        detCL = ( np.linalg.det(np.hstack([yl,temp]))
                 / np.linalg.det(np.hstack([VLa,temp])) )
        temp = linalg.null_space(yr.T)
        detCR = ( np.linalg.det(np.hstack([yr,temp]))
                 / np.linalg.det(np.hstack([VRb,temp])) )

        out = np.linalg.det(np.hstack([VLb,VRa]))*detCL*detCR

    elif e['evans'] == "regular_periodic":
        sigh = manifold_periodic(e['Li'],np.eye(e['kl']),lamda,s,p,m,e['kl'])
        out = np.zeros((1,len(kappa)),dtype=np.complex)
        for j in range(len(kappa)):
            out[j] = np.det(sigh-np.exp(1j*kappa[j]*p['X'])
                        *np.exp(e['kl']))

    elif e['evans'] == "balanced_periodic":
        sigh = manifold_periodic(e['Li'],np.eye(e['kl']),lamda,s,p,m,e['kl'])
        phi = manifold_periodic(e['Ri'],np.eye(e['kr']),lamda,s,p,m,e['kr'])
        out = np.zeros((1,len(kappa)),dtype=np.complex)
        for j in range(len(kappa)):
            out[j] = np.linalg.det(sigh-np.exp(1j*kappa[j]*p['X'])*phi)

    elif e['evans'] == "balanced_polar_scaled_periodic":
        kappa = yr
        Amatrix = e['A'](e['Li'][0],lamda,s,p)
        k, kdud = np.shape(Amatrix)
        egs = np.linalg.eigvals(Amatrix)
        real_part_egs = np.real(egs)
        cnt_pos = len(np.where(real_part_egs > 0)[0])
        cnt_neg = len(np.where(real_part_egs < 0)[0])
        if not (cnt_neg == e['dim_eig_R']):
            raise ValueError("consistent splitting failed")
        if not (cnt_pos == e['dim_eig_L']):
            raise ValueError("consistent splitting failed")
        index1 = np.argsort(-real_part_egs)
        muL = np.sum(egs[index1[0:e['dim_eig_L']]])
        index2 = np.argsort(real_part_egs)
        muR = np.sum(egs[index2[0:e['dim_eig_R']]])
        # Initializing vector
        ynot = linalg.orth(np.vstack([np.eye(k),np.eye(k)]))
        # Compute the manifolds
        sigh, gammal = manifold_polar(e['Li'],ynot,lamda,A_lift_matrix,s,p,m,k,muL)
        phi, gammar = manifold_polar(e['Ri'],ynot,lamda,A_lift_matrix,s,p,m,k,muR)
        #print(sigh, '\n', gammal, '\n', phi, '\n', gammar)
        #STOP
        out = np.zeros((1,len(kappa)),dtype=np.complex)
        for j in range(1,len(kappa)+1):
            out[:,j-1] = gammal*gammar*np.linalg.det(np.vstack([np.concatenate(
                [sigh[:k,:k],np.exp(1j*kappa[j-1]*p['X'])*phi[:k,:k]],axis=1),
                np.concatenate([sigh[k:2*k,:k], phi[k:2*k,:k]],axis=1)]))

    elif e['evans'] == "bpspm":
        out = Struct()
        out.lamda = lamda
        muL = 0
        muR = 0
        # initializing vector
        ynot = linalg.orth(np.vstack([np.eye(e['kl']),np.eye(e['kr'])]))
        # compute the manifolds
        out.sigh,out.gammal = manifold_polar(e['Li'],ynot,lamda,A_lift_matrix,s,p,
                                             m,e['kl'],muL)
        out.phi,out.gammar = manifold_polar(e['Ri'],ynot,lamda,A_lift_matrix,s,p,
                                             m,e['kr'],muR)

    elif e['evans'] == "balanced_polar_periodic":
        kappa = yr
        muL = 0
        muR = 0
        k = e['kl']

        # initializing vector
        ynot = linalg.orth(np.vstack([np.eye(k), np.eye(k)]))

        # compute the manifolds
        sigh, gammal = manifold_polar(e['Li'],ynot,lamda,A_lift_matrix,s,p,m,
                                      k,muL)
        phi, gammar = manifold_polar(e['Ri'],ynot,lamda,A_lift_matrix,s,p,m,
                                     k,muR)
        out = np.zeros(len(kappa),dtype=np.complex)
        for j in range(len(kappa)):
            out[j] = gammal*gammar*np.linalg.det(np.vstack([np.concatenate(
                    [sigh[:k,:k],np.exp(1j*kappa[j]*p.X)*phi[:k,:k]],axis=1),
                    np.concatenate([sigh[k:2*k,:k], phi[k:2*k,:k]],axis=1)]))

    else:
        raise ValueError("e['evans'], '"+e['evans']+"', is not implemented.")

    return out