Exemple #1
0
def test_zero_padding_rows_columns():
    r"""Test _zero_padding with random array padding rows and columns."""
    array1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
    array2 = np.array([[1, 2.5], [9, 5], [4, 8.5]])

    padded2, padded1 = _zero_padding(array2, array1, pad_mode='row-col')
    array2_test = np.array([[1, 2.5, 0], [9, 5, 0], [4, 8.5, 0], [0, 0, 0]])
    assert padded1.shape == (4, 3)
    assert padded2.shape == (4, 3)
    assert (abs(padded1 - array1) < 1.e-10).all()
    assert (abs(padded2 - array2_test) < 1.e-10).all()

    # Test in the scenario they have the same shape but fat rectangular.
    array1 = np.array([[60, 85, 86, 1.], [85, 151, 153, 2.],
                       [86, 153, 158, 10.]])
    padded2, padded1 = _zero_padding(array1, array1, pad_mode='row-col')
    assert np.all(np.abs(array1 - padded2)) < 1e-5
    assert np.all(np.abs(array1 - padded1)) < 1e-5
    assert padded1.shape == padded2.shape
    assert padded1.shape == (3, 4)

    # Test in the scenario they have the same shape but tall rectangular.
    array1 = np.random.random((2, 10))
    array2 = np.random.random((2, 10))
    padded2, padded1 = _zero_padding(array1, array2, pad_mode='row-col')
    assert np.all(np.abs(array1 - padded2)) < 1e-5
    assert np.all(np.abs(array2 - padded1)) < 1e-5
    assert padded1.shape == padded2.shape
    assert padded1.shape == (2, 10)
Exemple #2
0
def test_zero_padding_columns():
    r"""Test _zero_padding with random array padding columns."""
    array1 = np.array([[4, 7, 2], [1, 3, 5]])
    array2 = np.array([[5], [2]])

    # match the number of columns of the 1st array
    padded2, padded1 = _zero_padding(array2, array1, pad_mode='col')
    assert padded1.shape == (2, 3)
    assert padded2.shape == (2, 3)
    assert (abs(padded1 - array1) < 1.e-10).all()
    assert (abs(padded2 - np.array([[5, 0, 0], [2, 0, 0]])) < 1.e-10).all()

    # match the number of columns of the 1st array
    array3 = np.arange(8).reshape(8, 1)
    array4 = np.arange(8).reshape(2, 4)
    padded3, padded4 = _zero_padding(array3, array4, pad_mode='col')
    assert padded3.shape == (8, 4)
    assert padded4.shape == (2, 4)
    assert (abs(array4 - padded4) < 1.e-10).all()
    expected = list(range(8))
    expected.extend([0] * 24)
    expected = np.array(expected).reshape(4, 8).T
    assert (abs(expected - padded3) < 1.e-10).all()

    # padding the padded_arrays should not change anything
    padded5, padded6 = _zero_padding(padded3, padded4, pad_mode='col')
    assert padded3.shape == (8, 4)
    assert padded4.shape == (2, 4)
    assert padded5.shape == (8, 4)
    assert padded6.shape == (2, 4)
    assert (abs(padded5 - padded3) < 1.e-10).all()
    assert (abs(padded6 - padded4) < 1.e-10).all()
Exemple #3
0
def test_zero_padding_rows():
    r"""Test _zero_padding with random array padding rows."""
    array1 = np.array([[1, 2], [3, 4]])
    array2 = np.array([[5, 6]])

    # match the number of rows of the 1st array
    padded2, padded1 = _zero_padding(array2, array1, pad_mode='row')
    assert padded1.shape == (2, 2)
    assert padded2.shape == (2, 2)
    assert (abs(padded1 - array1) < 1.e-10).all()
    assert (abs(padded2 - np.array([[5, 6], [0, 0]])) < 1.e-10).all()

    # match the number of rows of the 1st array
    array3 = np.arange(8).reshape(2, 4)
    array4 = np.arange(8).reshape(4, 2)
    padded3, padded4 = _zero_padding(array3, array4, pad_mode='row')
    assert padded3.shape == (4, 4)
    assert padded4.shape == (4, 2)
    assert (abs(array4 - padded4) < 1.e-10).all()
    expected = list(range(8))
    expected.extend([0] * 8)
    expected = np.array(expected).reshape(4, 4)
    assert (abs(expected - padded3) < 1.e-10).all()

    # padding the padded_arrays should not change anything
    padded5, padded6 = _zero_padding(padded3, padded4, pad_mode='row')
    assert padded3.shape == (4, 4)
    assert padded4.shape == (4, 2)
    assert padded5.shape == (4, 4)
    assert padded6.shape == (4, 2)
    assert (abs(padded5 - padded3) < 1.e-10).all()
    assert (abs(padded6 - padded4) < 1.e-10).all()
Exemple #4
0
def test_zero_padding_square():
    r"""Test _zero_padding with squared array."""
    # Try two equivalent (but different sized) symmetric arrays
    array1 = np.array([[60, 85, 86], [85, 151, 153], [86, 153, 158]])
    array2 = np.array([[60, 85, 86, 0, 0], [85, 151, 153, 0, 0],
                       [86, 153, 158, 0, 0], [0, 0, 0, 0, 0]])
    square1, square2 = _zero_padding(array1, array2, pad_mode='square')
    assert square1.shape == square2.shape
    assert square1.shape[0] == square1.shape[1]

    # Test in the scenario they have the same shape but rectangular.
    array1 = np.array([[60, 85, 86, 1.], [85, 151, 153, 2.],
                       [86, 153, 158, 10.]])
    array2 = np.array([[60, 85, 86, 1.], [85, 151, 153, 2.],
                       [86, 153, 158, 10.]])
    square1, square2 = _zero_padding(array1, array2, pad_mode='square')
    assert square1.shape == square2.shape
    assert square1.shape[0] == square1.shape[1]
    assert square1.shape[0] == 4

    # Performing the analysis on equally sized square arrays should return the same input arrays
    sym_part = np.array([[1, 7, 8, 4], [6, 4, 8, 1]])
    array1 = np.dot(sym_part, sym_part.T)
    array2 = array1
    assert array1.shape == array2.shape
    square1, square2 = _zero_padding(array1, array2, pad_mode='square')
    assert square1.shape == square2.shape
    assert square1.shape[0] == square1.shape[1]
    assert (abs(array2 - array1) < 1.e-10).all()
Exemple #5
0
def permutation(
    a,
    b,
    pad=True,
    translate=False,
    scale=False,
    unpad_col=False,
    unpad_row=False,
    check_finite=True,
    weight=None,
):
    r"""Perform one-sided permutation Procrustes.

    Given matrix :math:`\mathbf{A}_{m \times n}` and a reference matrix :math:`\mathbf{B}_{m \times
    n}`, find the permutation transformation matrix :math:`\mathbf{P}_{n \times n}`
    that makes :math:`\mathbf{AP}` as close as possible to :math:`\mathbf{B}`. In other words,

    .. math::
       \underbrace{\text{min}}_{\left\{\mathbf{P} \left| {[\mathbf{P}]_{ij} \in \{0, 1\} \atop
       \sum_{i=1}^n [\mathbf{P}]_{ij} = \sum_{j=1}^n [\mathbf{P}]_{ij} = 1} \right. \right\}}
       \|\mathbf{A} \mathbf{P} - \mathbf{B}\|_{F}^2

    This Procrustes method requires the :math:`\mathbf{A}` and :math:`\mathbf{B}` matrices to
    have the same shape, which is guaranteed with the default ``pad=True`` argument for any given
    :math:`\mathbf{A}` and :math:`\mathbf{B}` matrices. In preparing the :math:`\mathbf{A}` and
    :math:`\mathbf{B}` matrices, the (optional) order of operations is: **1)** unpad zero
    rows/columns, **2)** translate the matrices to the origin, **3)** weight entries of
    :math:`\mathbf{A}`, **4)** scale the matrices to have unit norm, **5)** pad matrices with zero
    rows/columns so they have the same shape.

    Parameters
    ----------
    a : ndarray
        The 2d-array :math:`\mathbf{A}` which is going to be transformed.
    b : ndarray
        The 2d-array :math:`\mathbf{B}` representing the reference matrix.
    pad : bool, optional
        Add zero rows (at the bottom) and/or columns (to the right-hand side) of matrices
        :math:`\mathbf{A}` and :math:`\mathbf{B}` so that they have the same shape.
    translate : bool, optional
        If True, both arrays are centered at origin (columns of the arrays will have mean zero).
    scale : bool, optional
        If True, both arrays are normalized with respect to the Frobenius norm, i.e.,
        :math:`\text{Tr}\left[\mathbf{A}^\dagger\mathbf{A}\right] = 1` and
        :math:`\text{Tr}\left[\mathbf{B}^\dagger\mathbf{B}\right] = 1`.
    unpad_col : bool, optional
        If True, zero columns (with values less than 1.0e-8) on the right-hand side are removed.
    unpad_row : bool, optional
        If True, zero rows (with values less than 1.0e-8) at the bottom are removed.
    check_finite : bool, optional
        If True, convert the input to an array, checking for NaNs or Infs.
    weight : ndarray, optional
        The 1D-array representing the weights of each row of :math:`\mathbf{A}`. This defines the
        elements of the diagonal matrix :math:`\mathbf{W}` that is multiplied by :math:`\mathbf{A}`
        matrix, i.e., :math:`\mathbf{A} \rightarrow \mathbf{WA}`.

    Returns
    -------
    res : ProcrustesResult
        The Procrustes result represented as a class:`utils.ProcrustesResult` object.

    Notes
    -----
    The optimal :math:`n \times n` permutation matrix is obtained by,

    .. math::
        \mathbf{P}^{\text{opt}} =
        \arg \underbrace{\text{min}}_{\left\{\mathbf{P} \left| {[\mathbf{P}]_{ij} \in \{0, 1\}
        \atop \sum_{i=1}^n [\mathbf{P}]_{ij} = \sum_{j=1}^n [\mathbf{P}]_{ij} = 1} \right. \right\}}
            \|\mathbf{A} \mathbf{P} - \mathbf{B}\|_{F}^2
      = \underbrace{\text{max}}_{\left\{\mathbf{P} \left| {[\mathbf{P}]_{ij} \in \{0, 1\}
        \atop \sum_{i=1}^n [\mathbf{P}]_{ij} = \sum_{j=1}^n [\mathbf{P}]_{ij} = 1} \right. \right\}}
            \text{Tr}\left[\mathbf{P}^\dagger\mathbf{A}^\dagger\mathbf{B} \right]

    The solution is found by relaxing the problem into a linear programming problem. The solution
    to a linear programming problem is always at the boundary of the allowed region. So,

    .. math::
       \underbrace{\text{max}}_{\left\{\mathbf{P} \left| {[\mathbf{P}]_{ij} \in \{0, 1\}
       \atop \sum_{i=1}^n [\mathbf{P}]_{ij} = \sum_{j=1}^n [\mathbf{P}]_{ij} = 1} \right. \right\}}
          \text{Tr}\left[\mathbf{P}^\dagger\mathbf{A}^\dagger\mathbf{B} \right] =
       \underbrace{\text{max}}_{\left\{\mathbf{P} \left| {[\mathbf{P}]_{ij} \geq 0
       \atop \sum_{i=1}^n [\mathbf{P}]_{ij} = \sum_{j=1}^n [\mathbf{P}]_{ij} = 1} \right. \right\}}
          \text{Tr}\left[\mathbf{P}^\dagger\left(\mathbf{A}^\dagger\mathbf{B}\right) \right]

    This is a matching problem and can be solved by the Hungarian algorithm. The cost matrix is
    defined as :math:`\mathbf{A}^\dagger\mathbf{B}` and the `scipy.optimize.linear_sum_assignment`
    is used to solve for the permutation that maximizes the linear sum assignment problem.

    """
    # check inputs
    new_a, new_b = setup_input_arrays(
        a,
        b,
        unpad_col,
        unpad_row,
        pad,
        translate,
        scale,
        check_finite,
        weight,
    )
    # if number of rows is less than column, the arrays are made square
    if (new_a.shape[0] < new_a.shape[1]) or (new_b.shape[0] < new_b.shape[1]):
        new_a, new_b = _zero_padding(new_a, new_b, "square")

    # compute cost matrix C = A.T B
    c = np.dot(new_a.T, new_b)
    # compute permutation matrix using Hungarian algorithm
    p = _compute_permutation_hungarian(c)
    # compute one-sided permutation error
    error = compute_error(new_a, new_b, p)

    return ProcrustesResult(new_a=new_a, new_b=new_b, t=p, error=error)
Exemple #6
0
def symmetric(a,
              b,
              pad=True,
              translate=False,
              scale=False,
              unpad_col=False,
              unpad_row=False,
              check_finite=True,
              weight=None,
              lapack_driver="gesvd"):
    r"""Perform symmetric Procrustes.

    Given a matrix :math:`\mathbf{A}_{m \times n}` and a reference matrix :math:`\mathbf{B}_{m
    \times n}` with :math:`m \geqslant n`, find the symmetrix transformation matrix
    :math:`\mathbf{X}_{n \times n}` that makes :math:`\mathbf{AX}` as close as possible to
    :math:`\mathbf{B}`. In other words,

    .. math::
       \underbrace{\text{min}}_{\left\{\mathbf{X} \left| \mathbf{X} = \mathbf{X}^\dagger
                        \right. \right\}} \|\mathbf{A} \mathbf{X} - \mathbf{B}\|_{F}^2

    This Procrustes method requires the :math:`\mathbf{A}` and :math:`\mathbf{B}` matrices to
    have the same shape with :math:`m \geqslant n`, which is guaranteed with the default ``pad``
    argument for any given :math:`\mathbf{A}` and :math:`\mathbf{B}` matrices.
    In preparing the :math:`\mathbf{A}` and
    :math:`\mathbf{B}` matrices, the (optional) order of operations is: **1)** unpad zero
    rows/columns, **2)** translate the matrices to the origin, **3)** weight entries of
    :math:`\mathbf{A}`, **4)** scale the matrices to have unit norm, **5)** pad matrices with zero
    rows/columns so they have the same shape.

    Parameters
    ----------
    a : ndarray
        The 2D-array :math:`\mathbf{A}` which is going to be transformed.
    b : ndarray
        The 2D-array :math:`\mathbf{B}` representing the reference matrix.
    pad : bool, optional
        Add zero rows (at the bottom) and/or columns (to the right-hand side) of matrices
        :math:`\mathbf{A}` and :math:`\mathbf{B}` so that they have the same shape.
    translate : bool, optional
        If True, both arrays are centered at origin (columns of the arrays will have mean zero).
    scale : bool, optional
        If True, both arrays are normalized with respect to the Frobenius norm, i.e.,
        :math:`\text{Tr}\left[\mathbf{A}^\dagger\mathbf{A}\right] = 1` and
        :math:`\text{Tr}\left[\mathbf{B}^\dagger\mathbf{B}\right] = 1`.
    unpad_col : bool, optional
        If True, zero columns (with values less than 1.0e-8) on the right-hand side of the intial
        :math:`\mathbf{A}` and :math:`\mathbf{B}` matrices are removed.
    unpad_row : bool, optional
        If True, zero rows (with values less than 1.0e-8) at the bottom of the intial
        :math:`\mathbf{A}` and :math:`\mathbf{B}` matrices are removed.
    check_finite : bool, optional
        If True, convert the input to an array, checking for NaNs or Infs.
    weight : ndarray, optional
        The 1D-array representing the weights of each row of :math:`\mathbf{A}`. This defines the
        elements of the diagonal matrix :math:`\mathbf{W}` that is multiplied by :math:`\mathbf{A}`
        matrix, i.e., :math:`\mathbf{A} \rightarrow \mathbf{WA}`.
    lapack_driver : {'gesvd', 'gesdd'}, optional
        Whether to use the more efficient divide-and-conquer approach ('gesdd') or the more robust
        general rectangular approach ('gesvd') to compute the singular-value decomposition with
        `scipy.linalg.svd`.

    Returns
    -------
    res : ProcrustesResult
        The Procrustes result represented as a class:`utils.ProcrustesResult` object.

    Notes
    -----
    The optimal symmetrix matrix is obtained by,

    .. math::
       \mathbf{X}_{\text{opt}} = \arg
       \underbrace{\text{min}}_{\left\{\mathbf{X} \left| \mathbf{X} = \mathbf{X}^\dagger
                        \right. \right\}} \|\mathbf{A} \mathbf{X} - \mathbf{B}\|_{F}^2 =
       \underbrace{\text{min}}_{\left\{\mathbf{X} \left| \mathbf{X} = \mathbf{X}^\dagger
                        \right. \right\}}
                \text{Tr}\left[\left(\mathbf{A}\mathbf{X} - \mathbf{B} \right)^\dagger
                         \left(\mathbf{A}\mathbf{X} - \mathbf{B} \right)\right]

    Considering the singular value decomposition of :math:`\mathbf{A}`,

    .. math::
       \mathbf{A}_{m \times n} = \mathbf{U}_{m \times m}
                                 \mathbf{\Sigma}_{m \times n}
                                 \mathbf{V}_{n \times n}^\dagger

    where :math:`\mathbf{\Sigma}_{m \times n}` is a rectangular diagonal matrix with non-negative
    singular values :math:`\sigma_i = [\mathbf{\Sigma}]_{ii}` listed in descending order, define

    .. math::
       \mathbf{C}_{m \times n} = \mathbf{U}_{m \times m}^\dagger
                                 \mathbf{B}_{m \times n} \mathbf{V}_{n \times n}

    with elements denoted by :math:`c_{ij}`.
    Then we compute the symmetric matrix :math:`\mathbf{Y}_{n \times n}` with

    .. math::
       [\mathbf{Y}]_{ij} = \begin{cases}
              0 && i \text{ and } j > \text{rank} \left(\mathbf{A}\right) \\
              \frac{\sigma_i c_{ij} + \sigma_j c_{ji}}{\sigma_i^2 +
              \sigma_j^2} && \text{otherwise} \end{cases}

    It is worth noting that the first part of this definition only applies in the unusual case where
    :math:`\mathbf{A}` has rank less than :math:`n`. The :math:`\mathbf{X}_\text{opt}` is given by

    .. math::
       \mathbf{X}_\text{opt} = \mathbf{V Y V}^{\dagger}

    Examples
    --------
    >>> import numpy as np
    >>> a = np.array([[5., 2., 8.],
    ...               [2., 2., 3.],
    ...               [1., 5., 6.],
    ...               [7., 3., 2.]])
    >>> b = np.array([[ 52284.5, 209138. , 470560.5],
    ...               [ 22788.5,  91154. , 205096.5],
    ...               [ 46139.5, 184558. , 415255.5],
    ...               [ 22788.5,  91154. , 205096.5]])
    >>> res = symmetric(a, b, pad=True, translate=True, scale=True)
    >>> res.t   # symmetric transformation array
    array([[0.0166352 , 0.06654081, 0.14971682],
          [0.06654081, 0.26616324, 0.59886729],
          [0.14971682, 0.59886729, 1.34745141]])
    >>> res.error   # error
    4.483083428047388e-31

    """
    # check inputs
    new_a, new_b = setup_input_arrays(
        a,
        b,
        unpad_col,
        unpad_row,
        pad,
        translate,
        scale,
        check_finite,
        weight,
    )

    # if number of rows is less than column, the arrays are made square
    if (new_a.shape[0] < new_a.shape[1]) or (new_b.shape[0] < new_b.shape[1]):
        new_a, new_b = _zero_padding(new_a, new_b, "square")

    # if new_a.shape[0] < new_a.shape[1]:
    #     raise ValueError(f"Shape of A {new_a.shape}=(m, n) needs to satisfy m >= n.")
    #
    # if new_b.shape[0] < new_b.shape[1]:
    #     raise ValueError(f"Shape of B {new_b.shape}=(m, n) needs to satisfy m >= n.")

    # compute SVD of A & matrix C
    u, s, vt = scipy.linalg.svd(new_a, lapack_driver=lapack_driver)
    c = np.dot(np.dot(u.T, new_b), vt.T)

    # compute intermediate matrix Y
    n = new_a.shape[1]
    y = np.zeros((n, n))
    for i in range(n):
        for j in range(n):
            if s[i]**2 + s[j]**2 != 0:
                y[i,
                  j] = (s[i] * c[i, j] + s[j] * c[j, i]) / (s[i]**2 + s[j]**2)

    # compute optimum symmetric transformation matrix X
    x = np.dot(np.dot(vt.T, y), vt)
    error = compute_error(new_a, new_b, x)

    return ProcrustesResult(error=error, new_a=new_a, new_b=new_b, t=x, s=None)