Esempio n. 1
0
 def predict(self, x: np.ndarray, labels_n: int = None) -> list:
     log_probs = self.pi + x.dot(self.theta.transpose())
     scaled_log_probs = self.scale(log_probs)
     ordered_idxs = scaled_log_probs.argsort()[::-1][:labels_n]
     labels_and_log_probs = list(zip(self.labels[ordered_idxs],
                                     scaled_log_probs[ordered_idxs]))
     return labels_and_log_probs
Esempio n. 2
0
    def back_propagation(self, delta: np.ndarray, reg_lambda):
        """
        back propagation on a layer level
        """
        m = delta.shape[1]
        n_inputs = self.theta.shape[1]

        self.input_delta = delta

        if (self.output_delta is None) or (self.output_delta.shape != (n_inputs-1, m)):
            self.output_delta = np.zeros((n_inputs-1, m))  # pre-allocate memory
        a = self.input_a[1:, :]  # g'(z) = g(z)*(1-g(z)) = a*(1-a)
        self.output_delta[:] = self.theta.T[1:, :].dot(self.input_delta) * a * (-a+1)  # exclude theta for bias term

        if (self.d is None) or (self.d.shape != self.theta.shape):
            self.d = np.zeros_like(self.theta)  # pre-allocate memory
        self.d[:] = delta.dot(self.input_a.T)

        assert self.grad.shape == self.theta.shape  # gradient entry must be set before training
        self.grad[:] = (1/m)*self.d
        self.grad[:, 1:] += (reg_lambda/m) * self.theta[:, 1:]  # regularization. but not regularize bias units

        return self.output_delta
Esempio n. 3
0
 def derivative_J_sgd(theta: np.ndarray, X_b_i: np.ndarray, y_i):
     """
     求随机搜索方向
     """
     return X_b_i.T.dot(X_b_i.dot(theta) - y_i) * 2.
Esempio n. 4
0
 def transform(self, transf_matrix: np.ndarray) -> None:
     """
     Applies a homogeneous transform.
     :param transf_matrix: <np.float: 4, 4>. Homogenous transformation matrix.
     """
     self.points[:3, :] = transf_matrix.dot(np.vstack((self.points[:3, :], np.ones(self.nbr_points()))))[:3, :]
Esempio n. 5
0
def MSVE(w: np.ndarray, v_star: np.ndarray, X: np.ndarray, db: np.ndarray):
    v_hat = X.dot(w)
    err = v_hat - v_star
    return np.square(err).dot(db)
Esempio n. 6
0
 def calculate_net(self, image_vec: np.ndarray):
     return image_vec.dot(self.weights) + self.biases
Esempio n. 7
0
def rgb_to_yCbCr(img: np.ndarray) -> np.ndarray:
    img = img.astype(np.float)
    out = img.dot(_rgb_yuv_conv)
    out[:, :, (1, 2)] += 128
    return out.astype(np.uint8)
def optimize_svm(
        kernel_matrix: np.ndarray,
        y: np.ndarray,
        scaling: Optional[float] = None,
        maxiter: int = 500,
        show_progress: bool = False,
        lambda2: float = 0.001) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
    """
    Solving quadratic programming problem for SVM; thus, some constraints are fixed.

    Args:
        kernel_matrix: NxN array
        y: Nx1 array
        scaling: the scaling factor to renormalize the `y`, if it is None,
                 use L2-norm of `y` for normalization
        maxiter: number of iterations for QP solver
        show_progress: showing the progress of QP solver
        lambda2: L2 Norm regularization factor

    Returns:
        np.ndarray: Sx1 array, where S is the number of supports
        np.ndarray: Sx1 array, where S is the number of supports
        np.ndarray: Sx1 array, where S is the number of supports

    Raises:
        MissingOptionalLibraryError: If cvxpy is not installed
    """
    # pylint: disable=invalid-name, unused-argument
    try:
        import cvxpy
    except ImportError as ex:
        raise MissingOptionalLibraryError(
            libname='CVXPY',
            name='optimize_svm',
            pip_install="pip install 'qiskit-aqua[cvx]'",
            msg=str(ex)) from ex

    if y.ndim == 1:
        y = y[:, np.newaxis]
    H = np.outer(y, y) * kernel_matrix
    f = -np.ones(y.shape)
    if scaling is None:
        scaling = np.sum(np.sqrt(f * f))
    f /= scaling

    tolerance = 1e-2
    n = kernel_matrix.shape[1]

    P = np.array(H)
    q = np.array(f)
    G = -np.eye(n)
    I = np.eye(n)
    h = np.zeros(n)
    A = y.reshape(y.T.shape)
    b = np.zeros((1, 1))
    x = cvxpy.Variable(n)
    prob = cvxpy.Problem(
        cvxpy.Minimize((1 / 2) * cvxpy.quad_form(x, P) + q.T @ x +
                       lambda2 * cvxpy.quad_form(x, I)),
        [G @ x <= h, A @ x == b])
    prob.solve(verbose=show_progress, qcp=True)
    result = np.asarray(x.value).reshape((n, 1))
    alpha = result * scaling
    avg_y = np.sum(y)
    avg_mat = (alpha * y).T.dot(kernel_matrix.dot(np.ones(y.shape)))
    b = (avg_y - avg_mat) / n

    support = alpha > tolerance
    logger.debug('Solving QP problem is completed.')
    return alpha.flatten(), b.flatten(), support.flatten()
Esempio n. 9
0
def _TransformCoordinates(mat:np.ndarray, vec:np.ndarray) -> np.ndarray:
    return mat.dot(vec)
Esempio n. 10
0
 def loss(self, w: np.ndarray, *args) -> float:
     """Calculate loss function."""
     X, y = args
     return (0.5 * (self._q * (w - self._m)).dot(w - self._m) +
             np.log(1 + np.exp(-y * w.dot(X.T))).sum())
Esempio n. 11
0
def log_likelihood(x: np.ndarray, y: np.ndarray,
                   kernel: 'function'):  # noqa: F821
    k00 = calc_kernel_matrix(x, kernel)
    k00_inv = np.linalg.inv(k00)
    return -(np.linalg.slogdet(k00)[1] + y.dot(k00_inv.dot(y)))
Esempio n. 12
0
 def predictor(parameters: np.ndarray, inputs: np.ndarray) -> np.ndarray:
     return inputs.dot(parameters)
Esempio n. 13
0
def commutator_numpy(a: ndarray, b: ndarray):
    return a.dot(b) - b.dot(a)
Esempio n. 14
0
 def vol_val_xyz(self, vol: numpy.ndarray, aff: numpy.ndarray, val: float) -> numpy.ndarray:
     vox_idx = numpy.argwhere(vol == val)
     xyz = aff.dot(numpy.c_[vox_idx, numpy.ones(vox_idx.shape[0])].T)[:3].T
     return xyz
Esempio n. 15
0
def rotate_so3(points: np.ndarray, theta: np.ndarray) -> np.ndarray:
    return points.dot(rotation_matrix_so3(theta).T)
Esempio n. 16
0
def random_rotate_so3(points: np.ndarray,
                      theta_min: float = -np.pi,
                      theta_max: float = np.pi) -> np.ndarray:
    return points.dot(
        rotation_matrix_so3(np.random.uniform(theta_min, theta_max, 3)).T)
Esempio n. 17
0
 def grad(self, w: np.ndarray, *args) -> np.ndarray:
     """Calculate gradient."""
     X, y = args
     return self._q * (w - self._m) + (-1) * ((
         (y * X.T) / (1.0 + np.exp(y * w.dot(X.T)))).T).sum(axis=0)
Esempio n. 18
0
def bidiagonalize_real_matrix_pair_with_symmetric_products(
        mat1: np.ndarray,
        mat2: np.ndarray,
        tolerance: Tolerance = Tolerance.DEFAULT
) -> Tuple[np.ndarray, np.ndarray]:
    """Finds orthogonal matrices that diagonalize both mat1 and mat2.

    Requires mat1 and mat2 to be real.
    Requires mat1.T @ mat2 to be symmetric.
    Requires mat1 @ mat2.T to be symmetric.

    Args:
        mat1: One of the real matrices.
        mat2: The other real matrix.
        tolerance: Numeric error thresholds.

    Returns:
        A tuple (L, R) of two orthogonal matrices, such that both L @ mat1 @ R
        and L @ mat2 @ R are diagonal matrices.

    Raises:
        ValueError: Matrices don't meet preconditions (e.g. not real).
        ArithmeticError: Failed to meet specified tolerance.
    """

    if np.any(np.imag(mat1) != 0):
        raise ValueError('mat1 must be real.')
    if np.any(np.imag(mat2) != 0):
        raise ValueError('mat2 must be real.')
    if not predicates.is_hermitian(mat1.dot(mat2.T), tolerance):
        raise ValueError('mat1 @ mat2.T must be symmetric.')
    if not predicates.is_hermitian(mat1.T.dot(mat2), tolerance):
        raise ValueError('mat1.T @ mat2 must be symmetric.')

    # Use SVD to bi-diagonalize the first matrix.
    base_left, base_diag, base_right = _svd_handling_empty(np.real(mat1))
    base_diag = np.diag(base_diag)

    # Determine where we switch between diagonalization-fixup strategies.
    dim = base_diag.shape[0]
    rank = dim
    while rank > 0 and tolerance.all_near_zero(base_diag[rank - 1, rank - 1]):
        rank -= 1
    base_diag = base_diag[:rank, :rank]

    # Try diagonalizing the second matrix with the same factors as the first.
    semi_corrected = base_left.T.dot(np.real(mat2)).dot(base_right.T)

    # Fix up the part of the second matrix's diagonalization that's matched
    # against non-zero diagonal entries in the first matrix's diagonalization
    # by performing simultaneous diagonalization.
    overlap = semi_corrected[:rank, :rank]
    overlap_adjust = diagonalize_real_symmetric_and_sorted_diagonal_matrices(
        overlap, base_diag, tolerance)

    # Fix up the part of the second matrix's diagonalization that's matched
    # against zeros in the first matrix's diagonalization by performing an SVD.
    extra = semi_corrected[rank:, rank:]
    extra_left_adjust, _, extra_right_adjust = _svd_handling_empty(extra)

    # Merge the fixup factors into the initial diagonalization.
    left_adjust = combinators.block_diag(overlap_adjust, extra_left_adjust)
    right_adjust = combinators.block_diag(overlap_adjust.T,
                                           extra_right_adjust)
    left = left_adjust.T.dot(base_left.T)
    right = base_right.T.dot(right_adjust.T)

    # Check acceptability vs tolerances.
    if any(not predicates.is_diagonal(left.dot(mat).dot(right), tolerance)
           for mat in [mat1, mat2]):
        raise ArithmeticError('Failed to diagonalize to specified tolerance.')

    return left, right
Esempio n. 19
0
 def predict_proba(self, X: np.ndarray) -> np.ndarray:
     """Predict extected probability by the expected coefficient."""
     return sigmoid(X.dot(self._m))
def matrix_multiply(a: np.ndarray, b: np.ndarray) -> np.ndarray:
    """
    Multiply matrices a x b. This is O(n^3).
    """
    return a.dot(b)
Esempio n. 21
0
 def predict_proba_with_sampling(self, X: np.ndarray) -> np.ndarray:
     """Predict extected probability by the sampled coefficient."""
     return sigmoid(X.dot(self.sample()))
Esempio n. 22
0
def yCbCr_to_rgb(yuv: np.ndarray) -> np.ndarray:
    yuv = yuv.astype(np.float)
    yuv[:, :, (1, 2)] -= 128
    out = yuv.dot(_yuv_rgb_conv)
    return out.astype(np.uint8)
Esempio n. 23
0
def _two_view_rotation_inliers(
    b1: np.ndarray, b2: np.ndarray, R: np.ndarray, threshold: float
) -> List[int]:
    br2 = R.dot(b2.T).T
    ok = np.linalg.norm(br2 - b1, axis=1) < threshold
    return np.nonzero(ok)[0]
Esempio n. 24
0
 def translate(self, matrix: np.ndarray):
     self.vertices = matrix.dot(self.vertices)
Esempio n. 25
0
 def apply(self,
           a: np.ndarray,
           b: np.ndarray) -> np.ndarray:
     return a.dot(b)
Esempio n. 26
0
def estimate(
    data: pd.DataFrame,
    y: np.ndarray,
    x: np.ndarray,
    categorical_controls: List,
    check_rank=False,
    estimate_variance=False,
    get_residual=False,
    cluster=None,
    tol=None,
    within_if_fe=True,
):
    """ Automatically picks best method for least squares. y must be 2d. """
    if not y.ndim == 2:
        raise ValueError
    # Use within estimator even when more than one set of fixed effects

    if categorical_controls is None or len(categorical_controls) == 0:
        b = np.linalg.lstsq(x, y)[0]
        assert b.ndim == 2
        if estimate_variance or get_residual:
            error = y - x.dot(b)
            assert error.shape == y.shape
    # within estimator
    elif len(categorical_controls) == 1 or within_if_fe:
        if len(categorical_controls) > 1:
            dummies = sps.hstack([
                make_dummies(data[col], True)
                for col in categorical_controls[1:]
            ])
            x = np.hstack((x, dummies.A))

        x_df = pd.DataFrame(
            data=np.hstack((data[categorical_controls[0]].values[:, None], x)),
            columns=list(range(x.shape[1] + 1)),
        )
        pandas_grouped = x_df.groupby(0)
        x_demeaned = (x - pandas_grouped[list(range(
            1, x_df.shape[1]))].transform(np.mean).values)
        assert x_demeaned.shape == x.shape

        if check_rank:
            if tol is not None:
                _, not_collinear = find_collinear_cols(x_demeaned,
                                                       verbose=True,
                                                       tol=tol)
            else:
                _, not_collinear = find_collinear_cols(x_demeaned,
                                                       verbose=True)

            not_collinear = np.array(not_collinear)
            x = x[:, not_collinear]
            x_demeaned = x_demeaned[:, not_collinear]

        # k x n_outcomes
        b = np.linalg.lstsq(x_demeaned, y)[0]
        assert b.ndim == 2
        error = y - x.dot(b)
        assert error.shape == y.shape
        error_df = pd.DataFrame(
            data=np.hstack(
                (data[categorical_controls[0]].values[:, None], error)),
            columns=list(range(error.shape[1] + 1)),
        )
        pandas_grouped = error_df.groupby(0)
        # n_teachers x n_outcomes
        fixed_effects = pandas_grouped[list(range(
            1, error_df.shape[1]))].mean().values
        assert fixed_effects.ndim == 2
        # (n_teachers + k) x n_outcomes
        b = np.concatenate((fixed_effects, b))
        x = sps.hstack((make_dummies(data[categorical_controls[0]],
                                     False), x)).tocsr()
        assert b.shape[0] == x.shape[1]
        if estimate_variance or get_residual:
            error -= fixed_effects[data[categorical_controls[0]].values]
    else:
        dummies = get_all_dummies(data[categorical_controls].values)
        x = sps.hstack((dummies, sps.csc_matrix(x)))
        assert sps.issparse(x)
        assert type(x) is sps.csc_matrix
        if check_rank:
            collinear, _ = find_collinear_cols(x.T.dot(x).A)
            x = remove_cols_from_csc(x, collinear)
        if y.ndim == 1 or y.shape[1] == 1:
            b = sps.linalg.lsqr(x, y)[0]
        else:
            # TODO: there's a function for doing this all at once
            b = np.zeros((x.shape[1], y.shape[1]), order="F")
            for i in range(y.shape[1]):
                b[:, i] = sps.linalg.lsqr(x, y[:, i], atol=1e-10)[0]

        if estimate_variance or get_residual:
            if b.ndim == 1:
                b = b[:, None]
            assert b.ndim == 2
            predicted = x.dot(b)
            assert y.shape == predicted.shape
            error = y - predicted
            assert error.shape == y.shape

    assert np.all(np.isfinite(b))
    if not estimate_variance and not get_residual:
        return b, x

    if get_residual:
        return b, x, error

    if estimate_variance:
        assert b.shape[0] == x.shape[1]
        _, r = np.linalg.qr(x if type(x) is np.array else x.A)

        inv_r = scipy.linalg.solve_triangular(r, np.eye(r.shape[0]))
        inv_x_prime_x = inv_r.dot(inv_r.T)
        if cluster is not None:
            grouped = Groupby(data[cluster])

            def f(mat):
                return mat[:, 1:].T.dot(mat[:, 0])

            V = []
            for i in range(y.shape[1]):
                u_ = grouped.apply(
                    f,
                    np.hstack((error[:, i, None], x.A)),
                    shape=(grouped.n_keys, x.shape[1]),
                    broadcast=False,
                )

                inner = u_.T.dot(u_)
                V.append(inv_x_prime_x.dot(inner).dot(inv_x_prime_x))
        else:
            error_sums = np.sum(error**2, 0)
            assert len(error_sums) == y.shape[1]
            V = [
                inv_x_prime_x * es / (len(y) - x.shape[1]) for es in error_sums
            ]

        return b, x, error, V
Esempio n. 27
0
 def f(self, x: np.ndarray, training: bool) -> np.ndarray:
     self.x = x
     return x.dot(self._w)
 def forward(self, o: np.ndarray):
     self.belief_matrix = o.dot(self.t_T.dot(self.belief_matrix))
Esempio n. 29
0
def sres_to_schi2(res: np.ndarray, sres: np.ndarray):
    """Translate residual sensitivities to chi2 gradient."""
    return 2 * res.dot(sres)
Esempio n. 30
0
def _lanczos_m(A: np.ndarray, m: int, nv: int, rademacher: bool, starting_vectors: Optional[np.ndarray] = None) \
        -> Tuple[np.ndarray, np.ndarray]:
    r"""Lanczos algorithm computes symmetric m x m tridiagonal matrix T and matrix V with orthogonal rows
        constituting the basis of the Krylov subspace K_m(A, x),
        where x is an arbitrary starting unit vector.
        This implementation parallelizes `nv` starting vectors.

    Args:
        A: matrix based on which the Krylov subspace will be built.
        m: Number of Lanczos steps.
        nv: Number of random vectors.
        rademacher: True to use Rademacher distribution,
            False - standard normal for random vectors
        starting_vectors: Specified starting vectors.

    Returns:
        T: Array with shape (nv, m, m), where T[i, :, :] is the i-th symmetric tridiagonal matrix.
        V: Array with shape (n, m, nv) where, V[:, :, i] is the i-th matrix with orthogonal rows.
    """
    orthtol = 1e-5
    if starting_vectors is None:
        if rademacher:
            starting_vectors = np.sign(np.random.randn(A.shape[0], nv))
        else:
            starting_vectors = np.random.randn(A.shape[0], nv)  # init random vectors in columns: n x nv
    V = np.zeros((starting_vectors.shape[0], m, nv))
    T = np.zeros((nv, m, m))

    np.divide(starting_vectors, np.linalg.norm(starting_vectors, axis=0), out=starting_vectors)  # normalize each column
    V[:, 0, :] = starting_vectors

    w = A.dot(starting_vectors)
    alpha = np.einsum('ij,ij->j', w, starting_vectors)
    w -= alpha[None, :] * starting_vectors
    beta = np.einsum('ij,ij->j', w, w)
    np.sqrt(beta, beta)

    T[:, 0, 0] = alpha
    T[:, 0, 1] = beta
    T[:, 1, 0] = beta

    np.divide(w, beta[None, :], out=w)
    V[:, 1, :] = w
    t = np.zeros((m, nv))

    for i in range(1, m):
        old_starting_vectors = V[:, i - 1, :]
        starting_vectors = V[:, i, :]

        w = A.dot(starting_vectors)  # sparse @ dense
        w -= beta[None, :] * old_starting_vectors  # n x nv
        np.einsum('ij,ij->j', w, starting_vectors, out=alpha)

        T[:, i, i] = alpha

        if i < m - 1:
            w -= alpha[None, :] * starting_vectors  # n x nv

            # reortho
            np.einsum('ijk,ik->jk', V, w, out=t)
            w -= np.einsum('ijk,jk->ik', V, t)
            np.einsum('ij,ij->j', w, w, out=beta)
            np.sqrt(beta, beta)
            np.divide(w, beta[None, :], out=w)

            T[:, i, i + 1] = beta
            T[:, i + 1, i] = beta

            # more reotho
            innerprod = np.einsum('ijk,ik->jk', V, w)
            reortho = False
            for _ in range(100):
                if not (innerprod > orthtol).sum():
                    reortho = True
                    break

                np.einsum('ijk,ik->jk', V, w, out=t)
                w -= np.einsum('ijk,jk->ik', V, t)
                np.divide(w, np.linalg.norm(w, axis=0)[None, :], out=w)
                innerprod = np.einsum('ijk,ik->jk', V, w)

            V[:, i + 1, :] = w

            if (np.abs(beta) > 1e-6).sum() == 0 or not reortho:
                break

    return T, V
Esempio n. 31
0
 def beta(weights: np.ndarray) -> float:
     portfolio_equity = weights.dot(normalized)
     returns = np.diff(portfolio_equity) / portfolio_equity[:-1]
     return np.abs(np.corrcoef(returns, market_returns)[0, 1])
Esempio n. 32
0
def rotate_z(points: np.ndarray, theta: float) -> np.ndarray:
    return points.dot(rotation_matrix_z(theta).T)
Esempio n. 33
0
def cos_v(v1: np.ndarray, v2: np.ndarray):
    return v1.dot(v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
Esempio n. 34
0
 def output(self, x_input: np.ndarray) -> np.ndarray:
     self.layer_output = self.output_func(x_input.dot(self.W) + self.b)
     return self.layer_output
def randomized_range_finder(matrix: np.ndarray, size: int, n_iter: int, power_iteration_normalizer='auto',
                            random_state=None, return_all: bool = False) \
                            -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray, np.ndarray]]:
    """Compute an orthonormal matrix :math:`Q`, whose range approximates the range of the input matrix.

    :math:`A \\approx QQ^*A`.

    Parameters
    ----------
    matrix :
        Input matrix
    size :
        Size of the return array
    n_iter :
        Number of power iterations. It can be used to deal with very noisy
        problems. When 'auto', it is set to 4, unless ``size`` is small
        (< .1 * min(matrix.shape)) in which case ``n_iter`` is set to 7.
        This improves precision with few components.
    power_iteration_normalizer: ``'auto'`` (default), ``'QR'``, ``'LU'``, ``None``
            Whether the power iterations are normalized with step-by-step
            QR factorization (the slowest but most accurate), ``None``
            (the fastest but numerically unstable when ``n_iter`` is large, e.g.
            typically 5 or larger), or ``'LU'`` factorization (numerically stable
            but can lose slightly in accuracy). The ``'auto'`` mode applies no
            normalization if ``n_iter`` <= 2 and switches to ``'LU'`` otherwise.
    random_state: int, RandomState instance or ``None``, optional (default= ``None``)
        The seed of the pseudo random number generator to use when shuffling
        the data.  If int, random_state is the seed used by the random number
        generator; If RandomState instance, random_state is the random number
        generator; If ``None``, the random number generator is the RandomState
        instance used by `np.random`.
    return_all : if True, returns (range_matrix, random_matrix, random_proj)
                else returns range_matrix.

    Returns
    -------
    range_matrix : np.ndarray
        matrix (size x size) projection matrix, the range of which
        approximates well the range of the input matrix.
    random_matrix : np.ndarray, optional
        projection matrix
    projected_matrix : np.ndarray, optional
        product between the data and the projection matrix

    Notes
    -----
    Follows Algorithm 4.3 of
    `Finding structure with randomness: Stochastic algorithms for constructing approximate matrix decompositions
    <http://arxiv.org/pdf/0909.4061>`_
    Halko, et al., 2009 (arXiv:909)
    """
    random_state = check_random_state(random_state)

    # Generating normal random vectors with shape: (A.shape[1], size)
    random_matrix = random_state.normal(size=(matrix.shape[1], size))
    if matrix.dtype.kind == 'f':
        # Ensure f32 is preserved as f32
        random_matrix = random_matrix.astype(matrix.dtype, copy=False)
    range_matrix = random_matrix.copy()

    # Deal with "auto" mode
    if power_iteration_normalizer == 'auto':
        if n_iter <= 2:
            power_iteration_normalizer = 'none'
        else:
            power_iteration_normalizer = 'LU'

    # Perform power iterations with 'range_matrix' to further 'imprint' the top
    # singular vectors of matrix in 'range_matrix'
    for i in range(n_iter):
        if power_iteration_normalizer == 'none':
            range_matrix = safe_sparse_dot(matrix, range_matrix)
            range_matrix = safe_sparse_dot(matrix.T, range_matrix)
        elif power_iteration_normalizer == 'LU':
            range_matrix, _ = linalg.lu(safe_sparse_dot(matrix, range_matrix),
                                        permute_l=True)
            range_matrix, _ = linalg.lu(safe_sparse_dot(
                matrix.T, range_matrix),
                                        permute_l=True)
        elif power_iteration_normalizer == 'QR':
            range_matrix, _ = linalg.qr(safe_sparse_dot(matrix, range_matrix),
                                        mode='economic')
            range_matrix, _ = linalg.qr(safe_sparse_dot(
                matrix.T, range_matrix),
                                        mode='economic')

    # Sample the range of 'matrix' using by linear projection of 'range_matrix'
    # Extract an orthonormal basis
    range_matrix, _ = linalg.qr(safe_sparse_dot(matrix, range_matrix),
                                mode='economic')
    if return_all:
        return range_matrix, random_matrix, matrix.dot(random_matrix)
    else:
        return range_matrix
 def forward(self, o: np.ndarray):
     self.num_steps += 1
     if o is not None:
         self.belief_matrix = o.dot(self.t_T.dot(self.belief_matrix))
Esempio n. 37
0
def applyTransformation(transformation_matrix: np.ndarray,
                        vector: pygame.Vector3):
    return array_to_vector(transformation_matrix.dot(vector_to_array(vector)))