Exemple #1
0
def _svd_reduction_keeping_fixed_dims_using_V(input: Tensor,
                                              num: int) -> Tensor:
    """
    Outputs the SV part of SVCCA, removing a fixed number of SVD simensions.
    """
    left, diag, right = _svd(input)
    # svx = np.dot(sx[:dims_to_keep] * np.eye(dims_to_keep), Vx[:dims_to_keep])
    # - want [N, num]
    sv_input = left[:, :num] @ (diag[:num] * torch.eye(num, dtype=input.dtype))
    return sv_input
Exemple #2
0
def _cca_by_svd(x: Tensor, y: Tensor) -> Tuple[Tensor, Tensor, Tensor]:
    """ CCA using only SVD.
    For more details, check Press 2011 "Canonical Correlation Clarified by Singular Value Decomposition".
    This function assumes you've already preprocessed the matrices x and y appropriately. e.g. by centering and
    dividing by max value.

    Args:
        x: input tensor of Shape NxD1
        y: input tensor of shape NxD2

    Returns: x-side coefficients, y-side coefficients, diagonal

    """

    # torch.svd(x)[1] is vector
    u_1, s_1, v_1 = _svd(x)
    u_2, s_2, v_2 = _svd(y)
    uu = u_1.t() @ u_2
    # - see page 4 for correctness of this step
    u, diag, v = _svd(uu)
    # v @ s.diag() = v * s.view(-1, 1), but much faster
    a = (v_1 * s_1.reciprocal_().unsqueeze_(0)) @ u
    b = (v_2 * s_2.reciprocal_().unsqueeze_(0)) @ v
    return a, b, diag
Exemple #3
0
def _svd_reduction_keeping_fixed_dims(input: Tensor, num: int) -> Tensor:
    """
    Outputs the SV part of SVCCA, removing a fixed number of SVD simensions.

    input @ right[:, : num] == left[:, :num] @ (diag[:num] * torch.eye(num, dtype=input.dtype))
    since SVD gives orthogonal matrices and we are just canceling out the (V) right part...
    """
    left, diag, right = _svd(input)
    # full = diag.abs().sum()
    # ratio = diag.abs().cumsum(dim=0) / full
    # num = torch.where(ratio < accept_rate,
    #                   input.new_ones(1, dtype=torch.long),
    #                   input.new_zeros(1, dtype=torch.long)
    #                   ).sum()
    return input @ right[:, :num]
Exemple #4
0
def _svd_reduction(input: Tensor, accept_rate: float) -> Tensor:
    """
    Outputs the SV part of SVCCA, i.e. it does the dimensionality reduction of SV by removing neurons such that
    accept_rate (e.g. 0.99) of variance is kept.

    Note:
        - this might make your sanity check for SVCCA never have N < D since it might reduce the
        dimensions automatically such that there is less dimensions/neurons in [N, D'].

    :param input:
    :param accept_rate:
    :return:
    """
    left, diag, right = _svd(input)
    full = diag.abs().sum()
    ratio = diag.abs().cumsum(dim=0) / full
    num = torch.where(ratio < accept_rate, input.new_ones(1, dtype=torch.long),
                      input.new_zeros(1, dtype=torch.long)).sum()
    return input @ right[:, :num]
Exemple #5
0
def _cca_by_qr(x: Tensor, y: Tensor) -> Tuple[Tensor, Tensor, Tensor]:
    """ CCA using QR and SVD.
    For more details, check Press 1011 "Canonical Correlation Clarified by Singular Value Decomposition"
    This function assumes you've already preprocessed the matrices x and y appropriately. e.g. by centering and
    dividing by max value.

    Args:
        x: input tensor of Shape NxD1
        y: input tensor of shape NxD2

    Returns: x-side coefficients, y-side coefficients, diagonal

    """
    q_1, r_1 = torch.linalg.qr(x)
    q_2, r_2 = torch.linalg.qr(y)
    qq = q_1.t() @ q_2
    u, diag, v = _svd(qq)
    a = r_1.inverse() @ u
    b = r_2.inverse() @ v
    return a, b, diag