예제 #1
0
 def relentropy(self,idealrho=None):
   # attention - log not working on pure matrices with 0-eigenvalues
   self.check_idealrho(idealrho)
   a = np.trace(np.dot(self.idealrho,splnlg.logm(self.idealrho)))
   b = np.trace(np.dot(self.idealrho,splnlg.logm(self.exprho)))
   self.relativeentropy=np.real(a-b)
   return self.relativeentropy
    def test_logm_type_preservation_and_conversion(self):
        # The logm matrix function should preserve the type of a matrix
        # whose eigenvalues are positive with zero imaginary part.
        # Test this preservation for variously structured matrices.
        complex_dtype_chars = ('F', 'D', 'G')
        for matrix_as_list in (
                [[1, 0], [0, 1]],
                [[1, 0], [1, 1]],
                [[2, 1], [1, 1]],
                [[2, 3], [1, 2]]):

            # check that the spectrum has the expected properties
            W = scipy.linalg.eigvals(matrix_as_list)
            assert_(not any(w.imag or w.real < 0 for w in W))

            # check float type preservation
            A = np.array(matrix_as_list, dtype=float)
            A_logm, info = logm(A, disp=False)
            assert_(A_logm.dtype.char not in complex_dtype_chars)

            # check complex type preservation
            A = np.array(matrix_as_list, dtype=complex)
            A_logm, info = logm(A, disp=False)
            assert_(A_logm.dtype.char in complex_dtype_chars)

            # check float->complex type conversion for the matrix negation
            A = -np.array(matrix_as_list, dtype=float)
            A_logm, info = logm(A, disp=False)
            assert_(A_logm.dtype.char in complex_dtype_chars)
 def test_nils(self):
     a = array([[-2., 25., 0., 0., 0., 0., 0.],
                [0., -3., 10., 3., 3., 3., 0.],
                [0., 0., 2., 15., 3., 3., 0.],
                [0., 0., 0., 0., 15., 3., 0.],
                [0., 0., 0., 0., 3., 10., 0.],
                [0., 0., 0., 0., 0., -2., 25.],
                [0., 0., 0., 0., 0., 0., -3.]])
     m = (identity(7)*3.1+0j)-a
     logm(m, disp=False)
예제 #4
0
 def test_nils(self):
     a = array(
         [
             [-2.0, 25.0, 0.0, 0.0, 0.0, 0.0, 0.0],
             [0.0, -3.0, 10.0, 3.0, 3.0, 3.0, 0.0],
             [0.0, 0.0, 2.0, 15.0, 3.0, 3.0, 0.0],
             [0.0, 0.0, 0.0, 0.0, 15.0, 3.0, 0.0],
             [0.0, 0.0, 0.0, 0.0, 3.0, 10.0, 0.0],
             [0.0, 0.0, 0.0, 0.0, 0.0, -2.0, 25.0],
             [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -3.0],
         ]
     )
     m = (identity(7) * 3.1 + 0j) - a
     logm(m, disp=False)
예제 #5
0
 def test_opposite_sign_complex_eigenvalues(self):
     # See gh-6113
     E = [[0, 1], [-1, 0]]
     L = [[0, np.pi*0.5], [-np.pi*0.5, 0]]
     assert_allclose(expm(L), E, atol=1e-14)
     assert_allclose(logm(E), L, atol=1e-14)
     E = [[1j, 4], [0, -1j]]
     L = [[1j*np.pi*0.5, 2*np.pi], [0, -1j*np.pi*0.5]]
     assert_allclose(expm(L), E, atol=1e-14)
     assert_allclose(logm(E), L, atol=1e-14)
     E = [[1j, 0], [0, -1j]]
     L = [[1j*np.pi*0.5, 0], [0, -1j*np.pi*0.5]]
     assert_allclose(expm(L), E, atol=1e-14)
     assert_allclose(logm(E), L, atol=1e-14)
예제 #6
0
def karcher_mean(x, tol=0.01):
	'''
	Determined the Karcher mean of rotations
	Implementation from Algorithm 1, Rotation Averaging, Hartley et al, IJCV 2013
	'''
	R = x[0]
	N = x.shape[0]
	normDeltaR = np.inf
	itr = 0
	while True:
		#Estimate the delta rotation between the current center and all points
		deltaR  = np.zeros((3,3))
		oldNorm = normDeltaR
		for i in range(N):
			deltaR += linalg.logm(np.dot(np.transpose(R),x[i]))
		deltaR     = deltaR / N
		normDeltaR = linalg.norm(deltaR, ord='fro')/np.sqrt(2)

		if oldNorm - normDeltaR < tol:
			break
	
		R = np.dot(R, linalg.expm(deltaR)) 
		#print itr
		itr += 1		
	
	return R
예제 #7
0
def vn_entropy_b(psi_t, label, nos, nol_b, nop):
    """
    Calculates Von-Neumann entropy as S = - tr(rho * ln(rho)).
    Also calculates trace of square of density matrix (measure of
    entanglement).
    Uses a filter to suppress 'WARNING: The logm input matrix may be nearly
    singular'. Wraps loop in tqdm for progress bar.

    :param psi_t: Psi(t)
    :param label: Relabelled states
    :param nos: No. of states
    :param nol_b: No. of lattice sites in B
    :param nop: No. of particles
    :return: Real Von-Neumann entropy
    :return: Trace of density matrix of B
    """
    vn_entropy = np.zeros(len(psi_t), dtype=complex)
    tr_sqr = np.zeros(len(psi_t), dtype=float)

    warnings.filterwarnings("ignore")

    for idx, vec in enumerate(psi_t):
        d_matrix = rho_b_pbasis(label, vec, nos, nol_b, nop)
        vn_entropy[idx] = -np.trace(np.dot(d_matrix, la.logm(d_matrix)))
        tr_sqr[idx] = trace_squared(d_matrix)

    return vn_entropy.real, tr_sqr
예제 #8
0
def test_expm():
    """Testing expm function"""
    m = np.random.rand(15, 15)
    m = m + m.T
    m_exp = my_mfd.expm(m)
    assert_array_almost_equal(m_exp.T, m_exp)
    assert_array_almost_equal(linalg.logm(m_exp), m)
예제 #9
0
    def __init__(self):
        app.Canvas.__init__(self, position=(50, 50), keys='interactive')

        self.program = gloo.Program(VERT_SHADER, FRAG_SHADER)

        self.program['a_position'] = gloo.VertexBuffer(v_position)
        self.program['a_color'] = gloo.VertexBuffer(v_color)
        self.program['a_size'] = gloo.VertexBuffer(v_size)

        self.program['u_pan'] = (0., 0.)
        self.program['u_scale'] = (1., 1.)

        self.program['u_vec1'] = (1., 0., 0., 0.)
        self.program['u_vec2'] = (0., 1., 0., 0.)

        # Circulant matrix.
        circ = np.diagflat(np.ones(ndim-1), 1)
        circ[-1, 0] = -1 if ndim % 2 == 0 else 1
        self.logcirc = logm(circ)
        # We will solve the equation dX/dt = log(circ) * X in real time
        # to compute the matrix exponential expm(t*log(circ)).
        self.mat = np.eye(ndim)
        self.dt = .001
        gloo.set_state(clear_color=(1, 1, 1, 1), blend=True,
                       blend_func=('src_alpha', 'one_minus_src_alpha'))
        self._timer = app.Timer('auto', connect=self.on_timer, start=True)
예제 #10
0
def d2c(Ad, Bd, C, D, dt):
    """returns A, B, C, D
    Converts a set of digital state space system matrices to their continuous counterpart.
    """
    A = la.logm(Ad)/dt
    B = la.solve((Ad - sp.eye(A.shape[0])), A) @ Bd
    return A, B, C, D
예제 #11
0
 def test_al_mohy_higham_2012_experiment_1_logm(self):
     # The logm completes the round trip successfully.
     # Note that the expm leg of the round trip is badly conditioned.
     A = _get_al_mohy_higham_2012_experiment_1()
     A_logm, info = logm(A, disp=False)
     A_round_trip = expm(A_logm)
     assert_allclose(A_round_trip, A, rtol=1e-5, atol=1e-14)
예제 #12
0
def compute_Rt(P):
    """Gen a P matrix, calculate the R*t matrix."""
    try:
        Rt = matrix(sl.logm(P, disp = False)[0])
    except Exception as E:
        raise MatCalcExcep(str(E))

    return makeReal(Rt)
예제 #13
0
    def test_logm_type_conversion_mixed_sign_or_complex_spectrum(self):
        complex_dtype_chars = ("F", "D", "G")
        for matrix_as_list in ([[1, 0], [0, -1]], [[0, 1], [1, 0]], [[0, 1, 0], [0, 0, 1], [1, 0, 0]]):

            # check that the spectrum has the expected properties
            W = scipy.linalg.eigvals(matrix_as_list)
            assert_(any(w.imag or w.real < 0 for w in W))

            # check complex->complex
            A = np.array(matrix_as_list, dtype=complex)
            A_logm, info = logm(A, disp=False)
            assert_(A_logm.dtype.char in complex_dtype_chars)

            # check float->complex
            A = np.array(matrix_as_list, dtype=float)
            A_logm, info = logm(A, disp=False)
            assert_(A_logm.dtype.char in complex_dtype_chars)
예제 #14
0
 def test_round_trip_random_complex(self):
     np.random.seed(1234)
     for n in range(1, 6):
         M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n)
         for scale in np.logspace(-4, 4, 9):
             M = M_unscaled * scale
             M_logm, info = logm(M, disp=False)
             M_round_trip = expm(M_logm)
             assert_allclose(M_round_trip, M)
예제 #15
0
 def log(parameter):
     n_matrices = len(parameter) / 6
     matrices = numpy.empty((n_matrices, 4, 4))
     for i in xrange(n_matrices):
         p = parameter[i * 6: (i + 1) * 6].copy()
         matrices[i, :, :] = logm(
             rigid_from_parameters(p)
         )
     return matrices
예제 #16
0
 def log(parameter):
     n_matrices = len(parameter) / 9
     matrices = numpy.empty((n_matrices, 4, 4))
     for i in xrange(n_matrices):
         p = parameter[i * 9: (i + 1) * 9].copy()
         matrices[i, :, :] = logm(
             affine_from_parameters(p)
         )
     return matrices
예제 #17
0
 def test_multilog(self):
     A = np.zeros((self.k, self.m, self.m))
     l = np.zeros((self.k, self.m, self.m))
     for i in range(self.k):
         a = np.diag(rnd.rand(self.m))
         q, r = la.qr(rnd.randn(self.m, self.m))
         A[i] = q.dot(a.dot(q.T))
         l[i] = logm(A[i])
     np_testing.assert_allclose(multilog(A, pos_def=True), l)
예제 #18
0
 def test_logm_consistency(self):
     random.seed(1234)
     for dtype in [np.float64, np.complex128]:
         for n in range(1, 10):
             for scale in [1e-4, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2]:
                 # make logm(A) be of a given scale
                 A = (eye(n) + random.rand(n, n) * scale).astype(dtype)
                 if np.iscomplexobj(A):
                     A = A + 1j * random.rand(n, n) * scale
                 assert_array_almost_equal(expm(logm(A)), A)
예제 #19
0
 def test_real_mixed_sign_spectrum(self):
     # These matrices have real eigenvalues with mixed signs.
     # The output logm dtype is complex, regardless of input dtype.
     for M in (
             [[1, 0], [0, -1]],
             [[0, 1], [1, 0]]):
         for dt in float, complex:
             A = np.array(M, dtype=dt)
             A_logm, info = logm(A, disp=False)
             assert_(np.issubdtype(A_logm.dtype, np.complexfloating))
예제 #20
0
파일: xfm.py 프로젝트: 0rC0/pyezminc
    def avg(self, xfms):
        """
        Average wotj arbitraru number of linear transforms
        :param xfms: another XFM object or list of XFMs
        :return: new XFM object
        """
        self._check_linear()

        if isinstance(xfms, XFM):
            xfms = [xfms]

        R = spl.logm(self.par[0].trans)
        for x in xfms:
            x._check_linear()
            R += spl.logm(x.par[0].trans)
        R /= len(xfms) + 1
        R = spl.expm(R)
        R = sp.real(E)
        return XFM(xfm_matrix=R)
예제 #21
0
def load_audio_data(path):
    f = open(path, 'r')
    parsed = json.load(f)
    loudness = parsed['loudness']
    if args.vector == 'type1':
        return np.array(
            list(map(lambda band: band_to_vec(band, loudness),
                     parsed['bands']))).flatten()
    if args.vector == 'type2':
        cov = np.array(parsed['covariance'])
        log_cov = linalg.logm(cov)
        return log_cov.flatten()
    if args.vector == 'type3':
        means = np.array(
            list(
                map(lambda band: band_to_mean_vec(band, loudness),
                    parsed['bands']))).flatten()
        cov = np.array(parsed['covariance'])
        log_cov = linalg.logm(cov)
        return np.concatenate((means, log_cov.flatten()))
    if args.vector == 'type4':
        means = np.array(
            list(
                map(lambda band: band_to_mean_vec(band, loudness),
                    parsed['bands']))).flatten()
        return means
    if args.vector == 'type5':
        means = np.array(
            list(map(lambda band: band['loudness'] - loudness,
                     parsed['bands']))).flatten()
        return means
    if args.vector == 'type6':
        cov = np.array(parsed['covariance_short'])
        log_cov = linalg.logm(cov)
        return log_cov.flatten()
    if args.vector == 'type7':
        means = np.array(
            list(
                map(lambda band: band_to_mean_vec(band, loudness),
                    parsed['bands_short']))).flatten()
        cov = np.array(parsed['covariance_short'])
        log_cov = linalg.logm(cov)
        return np.concatenate((means, log_cov.flatten()))
예제 #22
0
 def test_logm_consistency(self):
     random.seed(1234)
     for dtype in [np.float64, np.complex128]:
         for n in range(1, 10):
             for scale in [1e-4, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2]:
                 # make logm(A) be of a given scale
                 A = (eye(n) + random.rand(n, n) * scale).astype(dtype)
                 if np.iscomplexobj(A):
                     A = A + 1j * random.rand(n, n) * scale
                 assert_array_almost_equal(expm(logm(A)), A)
예제 #23
0
def rel_pose_err(rel1, rel2):
    R1, R2 = rel1[:3, :3], rel2[:3, :3]
    t1, t2 = rel1[:3, 3], rel2[:3, 3]
    t1 = t1 / norm(t1)
    t2 = t2 / norm(t2)
    dot = np.dot(t1, t2)
    dot = np.clip(dot, -1, 1)
    R_err = norm(logm(np.matmul(R1.T, R2)))
    t_err = np.arccos(dot)
    return R_err, t_err
예제 #24
0
 def test_logm_nearly_singular(self):
     M = np.array([[1e-100]])
     expected_warning = _matfuncs_inv_ssq.LogmNearlySingularWarning
     with warnings.catch_warnings(record=True) as w:
         warnings.simplefilter('always')
         L, info = logm(M, disp=False)
         assert_equal(len(w), 1)
         assert_(issubclass(w[-1].category, expected_warning))
         E = expm(L)
         assert_allclose(E, M, atol=1e-14)
예제 #25
0
 def test_real_mixed_sign_spectrum(self):
     # These matrices have real eigenvalues with mixed signs.
     # The output logm dtype is complex, regardless of input dtype.
     for M in (
             [[1, 0], [0, -1]],
             [[0, 1], [1, 0]]):
         for dt in float, complex:
             A = np.array(M, dtype=dt)
             A_logm, info = logm(A, disp=False)
             assert_(np.issubdtype(A_logm.dtype, np.complexfloating))
예제 #26
0
 def get_cont_A_matrix(self):
     A = self.eval_model.sparse_compute_Lambda_at_best_index()
     if self.eval_model.type == 'c':
         A_matrix = A
     elif self.eval_model.type == 'd':
         A_matrix = logm(A) * 1.0 / self.eval_model.dt
     else:
         raise NotImplementedError
     self.cont_A_matrix = A_matrix
     return A_matrix
예제 #27
0
파일: helper.py 프로젝트: jiajunxu/Robotics
def get_quat(R):
    W = logm(R)
    w1 = W[2][1]
    w2 = W[0][2]
    w3 = W[1][0]
    w = np.array([[w1], [w2], [w3]])
    a = w / norm(w)
    theta = norm(w)
    q0 = np.cos(theta / 2)
    q123 = a * np.sin(theta / 2)
    return q123[0], q123[1], q123[2], q0
예제 #28
0
 def gradient_log(parameter):
     n_matrices = len(parameter) / 6
     matrices = numpy.empty((n_matrices, 4, 4))
     for i in xrange(n_matrices):
         p = parameter[i * 6:(i + 1) * 6].copy()
         #In the case that it is the gradient, we
         #take the parameter as a distance from the
         #identity, so we add the non-scale to it
         p[0:3] += 1
         matrices[i, :, :] = logm(rigid_from_parameters(p))
     return matrices
    def test_logm_type_conversion_mixed_sign_or_complex_spectrum(self):
        complex_dtype_chars = ('F', 'D', 'G')
        for matrix_as_list in (
                [[1, 0], [0, -1]],
                [[0, 1], [1, 0]],
                [[0, 1, 0], [0, 0, 1], [1, 0, 0]]):

            # check that the spectrum has the expected properties
            W = scipy.linalg.eigvals(matrix_as_list)
            assert_(any(w.imag or w.real < 0 for w in W))

            # check complex->complex
            A = np.array(matrix_as_list, dtype=complex)
            A_logm, info = logm(A, disp=False)
            assert_(A_logm.dtype.char in complex_dtype_chars)

            # check float->complex
            A = np.array(matrix_as_list, dtype=float)
            A_logm, info = logm(A, disp=False)
            assert_(A_logm.dtype.char in complex_dtype_chars)
예제 #30
0
파일: vlad.py 프로젝트: nutszebra/ddp
def geodesicDistanceOnSPD(x, y):
  if len(x.shape)!=1:
    sq = sqrtm(x)
    invsq = pinv2(sq)
    F = np.dot(np.dot(invsq, y), invsq)
    return np.linalg.norm(logm(F))
  else:
    sq = x**0.5
    invsq = 1.0 / sq
    F = invsq * y * invsq
    return np.linalg.norm(np.log(F))
예제 #31
0
파일: data.py 프로젝트: BGU-CS-VIL/JA-POLS
def convert_to_logm(T):
    """
    Get SE transformation, in shape: (1,6) \n
    Return SE transformation, in shape (1,6)
    """
    T = np.reshape(T, (2, 3))
    bottom = np.zeros((1, 3))
    bottom[0][2] = 1
    T = np.concatenate((T, bottom), axis=0)
    T_log = logm(T)[0:2, :]
    return T_log.ravel()
예제 #32
0
 def test_complex_spectrum_real_logm(self):
     # This matrix has complex eigenvalues and real logm.
     # Its output dtype depends on its input dtype.
     M = [[1, 1, 2], [2, 1, 1], [1, 2, 1]]
     for dt in float, complex:
         X = np.array(M, dtype=dt)
         w = scipy.linalg.eigvals(X)
         assert_(1e-2 < np.absolute(w.imag).sum())
         Y, info = logm(X, disp=False)
         assert_(np.issubdtype(Y.dtype, np.inexact))
         assert_allclose(expm(Y), X)
예제 #33
0
 def test_complex_spectrum_real_logm(self):
     # This matrix has complex eigenvalues and real logm.
     # Its output dtype depends on its input dtype.
     M = [[1, 1, 2], [2, 1, 1], [1, 2, 1]]
     for dt in float, complex:
         X = np.array(M, dtype=dt)
         w = scipy.linalg.eigvals(X)
         assert_(1e-2 < np.absolute(w.imag).sum())
         Y, info = logm(X, disp=False)
         assert_(np.issubdtype(Y.dtype, dt))
         assert_allclose(expm(Y), X)
예제 #34
0
 def distance(self, other):
     """Defines a distance function between two positive definite matrices
     
     Arguments:
         other {SymPos} -- The positive definite matrix to calculate the distance
     
     Returns:
         float -- the distance (a positive number)
     """
     C = np.matmul(self._coordinates, np.linalg.inv(other._coordinates))
     return np.linalg.norm(logm(C))
예제 #35
0
파일: vlad.py 프로젝트: nutszebra/ddp
def derivativeSquareOfGeodesicOnSPD(x,y):
  if len(x.shape)!=1:
    sq = sqrtm(x)
    invsq = pinv2(sq)
    F = np.dot(np.dot(invsq, y), invsq)
    return 2*np.dot(np.dot(sq,logm(F),sq))
  else:
    sq = x**0.5
    invsq = 1.0 / sq
    F = invsq * y * invsq
    return 2*sq*np.log(F)*sq
예제 #36
0
def discrete2continuous(tm, framerate):
    """
    Convert discrete transition matrix such as output by HMMs to continous transition rates estimates
    """
    assert tm.ndim == 2
    assert tm.shape[0] == tm.shape[1]
    nb_states = tm.shape[0]
    rm = np.eye(nb_states) + framerate * logm(tm)
    rm[rm < 0] = 0.0  # correct small negative values
    rm[np.eye(nb_states, dtype=bool)] -= 1
    return rm
def geodesic_dist(R, R_gt):  # _geo_err
    R, R_gt = map(np.matrix, [R, R_gt])
    # With out disp annoying error
    _logRR, errest = logm(R.transpose() * R_gt, disp=False)
    R_angle = norm(_logRR, 2) / sqrt(2)
    # This will do print("logm result may be inaccurate, approximate err =", errest)
    # R_angle  = norm(logm(R.transpose()*R_gt), 2) / sqrt(2)
    #
    # About different of numpy/scipy norm and matlab norm:
    #  http://stackoverflow.com/questions/26680412/getting-different-answers-with-matlab-and-python-norm-functions
    #  https://nl.mathworks.com/help/matlab/ref/norm.html
    return R_angle
예제 #38
0
def findIK(endT, S, M, theta=None, max_iter=100, max_err=0.001, mu=0.05):
    """
    Basically Inverse Kinematics
    Uses Newton's method to find joint vars to reach a given pose for a given robot. Returns joint positions and
    the error. endT, S, and M should be provided in the space frame. Stop condiditons are when the final pose is less than a given
    twist norm from the desired end pose or a maximum number of iterations are reached.
    TODO: Improve internal type flexibilty of input types
    :param endT: the desired end pose of the end effector
    :param S: a python list of 6x1 screw axes in the space frame
    :param M: the pose of the end effector when the robot is at the zero position
    :param theta: Optional - An initial guess of theta. If not provided, zeros are used. Should be a Nx1 numpy matrix
    :param max_iter: Optional - The maximum number of iterations of newtons method for error to fall below max_err. Default is 10
    :param max_err: Optional - The maximum error to determine the end of iterations before max_iter is reached. Default is 0.001 and should be good for PL/quizes
    :param mu: The normalizing coefficient (?) when computing the pseudo-inverse of the jacobian. Default is 0.05
    :returns: A tuple where the first element is an Nx1 numpy array of joint variables where the algorithm ended. Second
              element is the norm of the twist required to take the found pose to the desired pose. Essentially the error that PL checks against.
    """
    if isinstance(S, list):
        S = np.hstack(S)

    if theta is None:
        theta = np.zeros((S.shape[1], 1))
    outMat = []
    max_it = max_iter
    for i in range(2**(S.shape[1])):
        V = 100 * np.ones((6, 1))
        max_iter = max_it
        while np.linalg.norm(V) > max_err and max_iter > 0:
            curr_pose = evalT(S, theta, M)
            V = inv_bracket(logm(endT.dot(inv(curr_pose))))
            J = evalJ(S, theta)
            pinv = inv(J.transpose().dot(J) +
                       mu * np.identity(S.shape[1])).dot(J.transpose())
            thetadot = pinv.dot(V)

            theta = theta + thetadot
            for j in range(theta.size):
                while (theta[j] >= np.pi):
                    theta[j] -= 2 * np.pi
                while (theta[j] < -np.pi):
                    theta[j] += 2 * np.pi
            max_iter -= 1
        if (np.linalg.norm(V) <= max_err):
            theta_cop = theta
            novel = True
            for a in outMat:
                if (np.linalg.norm(theta_cop - a) < 0.1):
                    novel = False
            if (novel):
                outMat.append(theta_cop)
        for k in range(theta.shape[0]):
            theta[k, 0] = float(format(i, '06b')[k]) - 0.5
    return (outMat, np.linalg.norm(V))
예제 #39
0
def rotmat_to_angle_axis(rotMat):
	'''
		Converts a rotation matrix into angle axis format
	'''
	aa = linalg.logm(rotMat)
	aa = (aa - aa.transpose()	)/2.0
	v1,v2,v3 = -aa[1,2], aa[0,2], -aa[0,1]
	v  = np.array((v1,v2,v3))
	theta = np.linalg.norm(v)
	if theta>0:
		v     = v/theta
	return theta, v
예제 #40
0
def get_rot_angle(view1, view2):
	try:
		viewDiff = linalg.logm(np.dot(view2, np.transpose(view1)))
	except:
		print "Error Encountered"
		pdb.set_trace()

	viewDiff = linalg.norm(viewDiff, ord='fro')
	assert not any(np.isnan(viewDiff.flatten()))
	assert not any(np.isinf(viewDiff.flatten()))
	angle    = viewDiff/np.sqrt(2)
	return angle
예제 #41
0
def rotmat_to_angle_axis(rotMat):
    '''
		Converts a rotation matrix into angle axis format
	'''
    aa = linalg.logm(rotMat)
    aa = (aa - aa.transpose()) / 2.0
    v1, v2, v3 = -aa[1, 2], aa[0, 2], -aa[0, 1]
    v = np.array((v1, v2, v3))
    theta = np.linalg.norm(v)
    if theta > 0:
        v = v / theta
    return theta, v
예제 #42
0
 def matrix_average(
     self,
     matrices: List[np.ndarray],
     weights: Optional[np.ndarray] = None,
 ):
     if weights is None:
         num_matrices = len(matrices)
         weights = num_matrices * (1 / num_matrices, )
     logs = [w * logm(A) for (w, A) in zip(weights, matrices)]
     logs = np.array(logs)
     logs_sum = logs.sum(axis=0)
     return expm(logs_sum)
예제 #43
0
def covd_parallel(
        node, data, row_idx,
        col_idx):  # returns the vec of the logarithm of the cov matrix
    mask = row_idx == node  # find nearest neigthbors
    cluster = np.append(
        node, col_idx[mask]
    )  # define the local cluster, its size depends on the local connectivity
    C = np.cov(data[cluster, :], rowvar=False)
    L = linalg.logm(C)
    iu1 = np.triu_indices(L.shape[1])
    vec = L[iu1]
    return (node, vec)
def qcbOptimalDecomposition(r1, r2, theta, s=1):
    """
    Experimenting to find optimal reverse chernoff quantity
    """
    rho = fmp(np.matrix([[1 / 2, 0], [0, 1 / 2]]), s)
    sigma = fmp(
        np.matrix([[1 / 2, 2 * r2 * np.sin(theta)],
                   [2 * r2 * np.sin(theta), 1 / 2]]), s)
    tau = fmp(
        np.matrix([[1 + 4 * r1 * np.cos(theta), 0],
                   [0, 1 - 4 * r1 * np.cos(theta)]]), 1 - s)
    return np.trace(logm(np.matmul(tau, sigma, tau)))
예제 #45
0
파일: flows.py 프로젝트: changyi7231/MEF
    def forward(self, x, reverse=False, init=False):
        if init:
            if self.conv_type == 'matrixexp':
                rand = special_ortho_group.rvs(self.in_channels)
                rand = logm(rand)
                rand = torch.from_numpy(rand.real)
                self.weight.data.copy_(rand)
            elif self.conv_type == 'standard':
                nn.init.orthogonal_(self.weight)
            elif self.conv_type == 'decomposition':
                w = ortho_group.rvs(self.in_channels)
                p, l, u = scipy.linalg.lu(w)
                self.p.copy_(torch.from_numpy(p))
                self.l.data.copy_(torch.from_numpy(l))
                self.u.data.copy_(torch.from_numpy(u))
            else:
                raise ValueError('wrong 1x1 conlution type')

        if not reverse:
            if self.conv_type == 'matrixexp':
                weight = expm(self.weight)
                x = F.conv2d(x, weight.view(self.in_channels, self.in_channels, 1, 1))
                log_det = torch.diagonal(self.weight).sum().mul(x.size(2) * x.size(3))
            elif self.conv_type == 'standard':
                x = F.conv2d(x, self.weight.view(self.in_channels, self.in_channels, 1, 1))
                _, log_det = torch.slogdet(self.weight)
                log_det = log_det.mul(x.size(2) * x.size(3))
            elif self.conv_type == 'decomposition':
                l = self.l * self.l_mask + self.identity
                u = self.u * self.u_mask
                weight = torch.matmul(self.p, torch.matmul(l, u))
                x = F.conv2d(x, weight.view(self.in_channels, self.in_channels, 1, 1))
                log_det = torch.diagonal(self.u).abs().log().sum().mul(x.size(2) * x.size(3))
            else:
                raise ValueError('wrong 1x1 conlution type')
        else:
            if self.conv_type == 'matrixexp':
                weight = expm(-self.weight)
                x = F.conv2d(x, weight.view(self.in_channels, self.in_channels, 1, 1))
                log_det = torch.diagonal(self.weight).sum().mul(x.size(2) * x.size(3)).mul(-1)
            elif self.conv_type == 'standard':
                x = F.conv2d(x, torch.inverse(self.weight).view(self.in_channels, self.in_channels, 1, 1))
                _, log_det = torch.slogdet(self.weight)
                log_det = log_det.mul(x.size(2) * x.size(3)).mul(-1)
            elif self.conv_type == 'decomposition':
                l = self.l * self.l_mask + self.identity
                u = self.u * self.u_mask
                weight = torch.matmul(self.p, torch.matmul(l, u))
                x = F.conv2d(x, torch.inverse(weight).view(self.in_channels, self.in_channels, 1, 1))
                log_det = torch.diagonal(self.u).sum().mul(x.size(2) * x.size(3)).mul(-1)
            else:
                raise ValueError('wrong 1x1 conlution type')
        return x, log_det
    def mean_of_resampled_particles(self, particles):
        '''
        :param particles: pandas df of resampled particles (all with the same weight)
        :return: mean of rotation matrix
        '''
        rotmats = np.zeros((len(particles), 3, 3))
        for i in range(len(particles)):
            rotmats[i] = sp.logm(particles['Rotation'].as_matrix()[i])
        liemean = sum(rotmats) / len(particles)
        mean = sp.expm(liemean)

        return mean
예제 #47
0
    def generator(self, t=1.0):
        """ Compute the generator of a transition matrix

        :param t: the timescale parameter
        :type t: float

        :Example:

        G = A.generator()
        """
        generator = logm(self) / t
        return generator
def interpolate(transformation, nframes=50):
    """Returns a list of transformations that interpolates between the
    identity transformation and the specified transformation. If 'nframes'
    equals 1, then simply return the input transformation in a singleton
    list."""
    if nframes == 1:
        return [transformation]
    if dual_matrix_determinant(transformation)[0,0] <= 0:
        raise DeterminantNegativeException
    log_transformation = logm(transformation)
    return [expm(log_transformation * i/nframes).real
            for i in range(nframes)]
예제 #49
0
def get_rot_angle(view1, view2):
    try:
        viewDiff = linalg.logm(np.dot(view2, np.transpose(view1)))
    except:
        print "Error Encountered"
        pdb.set_trace()

    viewDiff = linalg.norm(viewDiff, ord='fro')
    assert not any(np.isnan(viewDiff.flatten()))
    assert not any(np.isinf(viewDiff.flatten()))
    angle = viewDiff / np.sqrt(2)
    return angle
예제 #50
0
def calc_rt_dist_m(pose_src, pose_tgt):
    R_src = pose_src[:, :3]
    T_src = pose_src[:, 3]
    R_tgt = pose_tgt[:, :3]
    T_tgt = pose_tgt[:, 3]
    temp = logm(np.dot(np.transpose(R_src), R_tgt))
    rd_rad = LA.norm(temp, 'fro') / np.sqrt(2)
    rd_deg = rd_rad / np.pi * 180

    td = LA.norm(T_tgt - T_src)

    return rd_deg, td
def apply_log_vect(M):
    d = M.shape[0]
    n = d * (d + 1) / 2
    V = np.zeros((n, 1))
    offset = 0.001 * np.eye(d, d)
    true_mat = np.ones((d, d))
    true_mat = true_mat.astype(np.int64)
    in_triu = np.triu(true_mat)
    logM = np.real(lina.logm(M + offset))
    V = logM[in_triu == 1]
    V = np.array([V])
    return V
예제 #52
0
 def test_logm_exactly_singular(self):
     A = np.array([[0, 0], [1j, 1j]])
     B = np.asarray([[1, 1], [0, 0]])
     for M in A, A.T, B, B.T:
         expected_warning = _matfuncs_inv_ssq.LogmExactlySingularWarning
         with warnings.catch_warnings(record=True) as w:
             warnings.simplefilter('always')
             L, info = logm(M, disp=False)
             assert_equal(len(w), 1)
             assert_(issubclass(w[-1].category, expected_warning))
             E = expm(L)
             assert_allclose(E, M, atol=1e-14)
예제 #53
0
 def forward(ctx, mat):
     from scipy.linalg import logm
     if mat.requires_grad:
         ctx.save_for_backward(mat)
     device = mat.device
     input_complex = mat.is_complex()
     mat = mat.cpu().numpy()
     mat = logm(mat)
     mat = torch.as_tensor(mat, device=device)
     if not input_complex and mat.is_complex():
         mat = mat.real
     return mat
예제 #54
0
def haar_init_(A):
    """ Haar initialization on SO(n) """
    torch.nn.init.orthogonal_(A)
    with torch.no_grad():
        if A.det() < 0.:
            # Go bijectively from O^-(n) to O^+(n) \iso SO(n)
            idx = np.random.randint(0, A.size(0))
            A[idx] *= -1.
        An = la.logm(A.data.cpu().numpy()).real
        An = .5 * (An - An.T)
        A.copy_(torch.tensor(An))
        return A
예제 #55
0
def J_r_for_sync(*, poses_init, poses_est, pose_links, Z, cal_jacobian):
    J = np.zeros((6 + 6 * len(Z), 6 * len(poses_init)))
    r = np.zeros((6 + 6 * len(Z), 1))
    r[0:6] = Lie.vee(logm(np.linalg.inv(poses_init[0]).dot(poses_est[0])))
    J[0:6, 0:6] = Lie.RightJacobianInverse_SE3(r[0:6])
    for i, (a, b) in enumerate(pose_links):
        # Measurement is T_ab = T_a.inv() * T_b
        # ||J*dx + h(x) - z||_2
        # r = h(x) - z = vee(log(T_a.inv()*T_b)) - vee(log(T_ab))
        # r = vee(log(T_a.inv()*T_b)) - vee(log(T_ab))
        # r = vee(log(T_ba * (T_a.inv()*T_b)))
        res_idx = 6 * (i + 1)

        R_ab_z = Z[i][:3, :3]
        t_ab_z = Z[i][:3, [3]]
        R_ba_z = R_ab_z.transpose()
        t_ba_z = -R_ab_z.transpose().dot(t_ab_z)
        T_a = poses_est[a]
        T_b = poses_est[b]
        R_a = T_a[:3, :3]
        R_b = T_b[:3, :3]
        t_a = T_a[0:3, [3]]
        t_b = T_b[0:3, [3]]
        R_ab = R_a.transpose().dot(R_b)
        tb_minus_ta = t_b - t_a
        t_ab = R_a.transpose().dot(tb_minus_ta)

        # res = Lie.vee(np.linalg.inv(Z[i]).dot(
        #     np.linalg.inv(poses_est[a]).dot(poses_est[b])))
        r_T = np.zeros((4, 4))
        r_T[3, 3] = 1
        r_T[:3, :3] = R_ba_z.dot(R_a.transpose()).dot(R_b)
        r_T[:3, [3]] = R_ba_z.dot(R_a.transpose()).dot(tb_minus_ta) + t_ba_z
        res = Lie.vee(r_T)

        r[res_idx:res_idx + 6] = res
        if not cal_jacobian:
            continue
        # Measurement is T_ab = T_a.inv() * T_b
        T_ba = np.zeros((4, 4))
        T_ba[3, 3] = 1
        T_ba[:3, :3] = R_b.transpose().dot(R_a)
        T_ba[:3, [3]] = R_b.transpose().dot(t_a - t_b)
        J[res_idx:res_idx + 6, a * 6:(a + 1) * 6] = - \
            Lie.RightJacobianInverse_SE3(res).dot(Lie.Adjoint_SE3(T_ba))
        J[res_idx:res_idx + 6,
          b * 6:(b + 1) * 6] = Lie.RightJacobianInverse_SE3(res)
        # solve normal equations
    if not cal_jacobian:
        return r
    else:
        return (J, r)
예제 #56
0
 def gradient_log(parameter):
     n_matrices = len(parameter) / 6
     matrices = numpy.empty((n_matrices, 4, 4))
     for i in xrange(n_matrices):
         p = parameter[i * 6: (i + 1) * 6].copy()
         #In the case that it is the gradient, we
         #take the parameter as a distance from the
         #identity, so we add the non-scale to it
         p[0: 3] += 1
         matrices[i, :, :] = logm(
             rigid_from_parameters(p)
         )
     return matrices
def apply_log_vect(M):

    d = M.shape[0]
    n = d*(d+1)/2
    V = np.zeros((n,1))
    offset = 0.001*np.eye(d,d)
    true_mat = np.ones((d,d))
    true_mat = true_mat.astype(np.int64)
    in_triu = np.triu(true_mat)
    logM = np.real(lina.logm(M+offset))
    V = logM[in_triu==1]
    V = np.array([V])
    return V
예제 #58
0
def discrete2cont(sys, dt, method='zoh', alpha=None):
    sys = LinearSystem(sys)
    if sys.analog:
        raise ValueError("system (%s) is already continuous" % sys)

    if dt <= 0:
        raise ValueError("dt (%s) must be positive" % (dt,))

    ad, bd, cd, dd = sys.ss
    n = ad.shape[0]
    m = n + bd.shape[1]

    if method == 'gbt':
        if alpha is None or alpha < 0 or alpha > 1:
            raise ValueError("alpha (%s) must be in range [0, 1]" % (alpha,))

        I = np.eye(n)
        ar = solve(alpha*dt*ad.T + (1-alpha)*dt*I, ad.T - I).T
        M = I - alpha*dt*ar

        br = np.dot(M, bd) / dt
        cr = np.dot(cd, M)
        dr = dd - alpha*np.dot(cr, bd)

    elif method in ('bilinear', 'tustin'):
        return discrete2cont(sys, dt, method='gbt', alpha=0.5)

    elif method in ('euler', 'forward_diff'):
        return discrete2cont(sys, dt, method='gbt', alpha=0.0)

    elif method == 'backward_diff':
        return discrete2cont(sys, dt, method='gbt', alpha=1.0)

    elif method == 'zoh':
        M = np.zeros((m, m))
        M[:n, :n] = ad
        M[:n, n:] = bd
        M[n:, n:] = np.eye(bd.shape[1])
        E = linalg.logm(M) / dt

        ar = E[:n, :n]
        br = E[:n, n:]
        cr = cd
        dr = dd

    else:
        raise ValueError("invalid method: '%s'" % (method,))

    return LinearSystem((ar, br, cr, dr), analog=True)
예제 #59
0
    def orientation_error(self, q_a, q_b):
        '''Error from q_a to q_b
        '''
        M_a = transformations.quaternion_matrix(q_a)[:3, :3]
        M_b = transformations.quaternion_matrix(q_b)[:3, :3]
        error_matrix = M_b.dot(np.transpose(M_a))
        try:
            lie_alg_error = np.real(linalg.logm(error_matrix))
        except Exception:
            # We get an Exception("Internal Inconsistency") when the error is zero
            # No error!
            return np.array([0.0, 0.0, 0.0])

        angle_axis = sub8_utils.deskew(lie_alg_error)
        assert np.linalg.norm(angle_axis) < (2 * np.pi) + 0.01, "uh-oh, unnormalized {}".format(angle_axis)
        return angle_axis