コード例 #1
0
    def _matmul(self, x, adjoint=False, adjoint_arg=False):
        arg_dim = -1 if adjoint_arg else -2
        block_dimensions = (self._block_range_dimensions()
                            if adjoint else self._block_domain_dimensions())
        blockwise_arg = linear_operator_util.arg_is_blockwise(
            block_dimensions, x, arg_dim)
        if blockwise_arg:
            split_x = x
        else:
            split_dim = -1 if adjoint_arg else -2
            # Split input by rows normally, and otherwise columns.
            split_x = linear_operator_util.split_arg_into_blocks(
                self._block_domain_dimensions(),
                self._block_domain_dimension_tensors,
                x,
                axis=split_dim)

        result_list = []
        for index, operator in enumerate(self.operators):
            result_list += [
                operator.matmul(split_x[index],
                                adjoint=adjoint,
                                adjoint_arg=adjoint_arg)
            ]

        if blockwise_arg:
            return result_list

        result_list = linear_operator_util.broadcast_matrix_batch_dims(
            result_list)
        return array_ops.concat(result_list, axis=-2)
コード例 #2
0
def _test_matmul_base(self, use_placeholder, shapes_info, dtype, adjoint,
                      adjoint_arg, blockwise_arg, with_batch):
    # If batch dimensions are omitted, but there are
    # no batch dimensions for the linear operator, then
    # skip the test case. This is already checked with
    # with_batch=True.
    if not with_batch and len(shapes_info.shape) <= 2:
        return
    with self.session(graph=ops.Graph()) as sess:
        sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
        operator, mat = self.operator_and_matrix(
            shapes_info, dtype, use_placeholder=use_placeholder)
        x = self.make_x(operator, adjoint=adjoint, with_batch=with_batch)
        # If adjoint_arg, compute A X^H^H = A X.
        if adjoint_arg:
            op_matmul = operator.matmul(linalg.adjoint(x),
                                        adjoint=adjoint,
                                        adjoint_arg=adjoint_arg)
        else:
            op_matmul = operator.matmul(x, adjoint=adjoint)
        mat_matmul = math_ops.matmul(mat, x, adjoint_a=adjoint)
        if not use_placeholder:
            self.assertAllEqual(op_matmul.shape, mat_matmul.shape)

        # If the operator is blockwise, test both blockwise `x` and `Tensor` `x`;
        # else test only `Tensor` `x`. In both cases, evaluate all results in a
        # single `sess.run` call to avoid re-sampling the random `x` in graph mode.
        if blockwise_arg and len(operator.operators) > 1:
            # pylint: disable=protected-access
            block_dimensions = (operator._block_range_dimensions() if adjoint
                                else operator._block_domain_dimensions())
            block_dimensions_fn = (operator._block_range_dimension_tensors
                                   if adjoint else
                                   operator._block_domain_dimension_tensors)
            # pylint: enable=protected-access
            split_x = linear_operator_util.split_arg_into_blocks(
                block_dimensions, block_dimensions_fn, x, axis=-2)
            if adjoint_arg:
                split_x = [linalg.adjoint(y) for y in split_x]
            split_matmul = operator.matmul(split_x,
                                           adjoint=adjoint,
                                           adjoint_arg=adjoint_arg)

            self.assertEqual(len(split_matmul), len(operator.operators))
            split_matmul = linear_operator_util.broadcast_matrix_batch_dims(
                split_matmul)
            fused_block_matmul = array_ops.concat(split_matmul, axis=-2)
            op_matmul_v, mat_matmul_v, fused_block_matmul_v = sess.run(
                [op_matmul, mat_matmul, fused_block_matmul])

            # Check that the operator applied to blockwise input gives the same result
            # as matrix multiplication.
            self.assertAC(fused_block_matmul_v, mat_matmul_v)
        else:
            op_matmul_v, mat_matmul_v = sess.run([op_matmul, mat_matmul])

        # Check that the operator applied to a `Tensor` gives the same result as
        # matrix multiplication.
        self.assertAC(op_matmul_v, mat_matmul_v)
コード例 #3
0
def _test_solve_base(self, use_placeholder, shapes_info, dtype, adjoint,
                     adjoint_arg, blockwise_arg, with_batch):
    # If batch dimensions are omitted, but there are
    # no batch dimensions for the linear operator, then
    # skip the test case. This is already checked with
    # with_batch=True.
    if not with_batch and len(shapes_info.shape) <= 2:
        return
    with self.session(graph=ops.Graph()) as sess:
        sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
        operator, mat = self.operator_and_matrix(
            shapes_info, dtype, use_placeholder=use_placeholder)
        rhs = self.make_rhs(operator, adjoint=adjoint, with_batch=with_batch)
        # If adjoint_arg, solve A X = (rhs^H)^H = rhs.
        if adjoint_arg:
            op_solve = operator.solve(linalg.adjoint(rhs),
                                      adjoint=adjoint,
                                      adjoint_arg=adjoint_arg)
        else:
            op_solve = operator.solve(rhs,
                                      adjoint=adjoint,
                                      adjoint_arg=adjoint_arg)
        mat_solve = linear_operator_util.matrix_solve_with_broadcast(
            mat, rhs, adjoint=adjoint)
        if not use_placeholder:
            self.assertAllEqual(op_solve.shape, mat_solve.shape)

        # If the operator is blockwise, test both blockwise rhs and `Tensor` rhs;
        # else test only `Tensor` rhs. In both cases, evaluate all results in a
        # single `sess.run` call to avoid re-sampling the random rhs in graph mode.
        if blockwise_arg and len(operator.operators) > 1:
            split_rhs = linear_operator_util.split_arg_into_blocks(
                operator._block_domain_dimensions(),  # pylint: disable=protected-access
                operator._block_domain_dimension_tensors,  # pylint: disable=protected-access
                rhs,
                axis=-2)
            if adjoint_arg:
                split_rhs = [linalg.adjoint(y) for y in split_rhs]
            split_solve = operator.solve(split_rhs,
                                         adjoint=adjoint,
                                         adjoint_arg=adjoint_arg)
            self.assertEqual(len(split_solve), len(operator.operators))
            split_solve = linear_operator_util.broadcast_matrix_batch_dims(
                split_solve)
            fused_block_solve = array_ops.concat(split_solve, axis=-2)
            op_solve_v, mat_solve_v, fused_block_solve_v = sess.run(
                [op_solve, mat_solve, fused_block_solve])

            # Check that the operator and matrix give the same solution when the rhs
            # is blockwise.
            self.assertAC(mat_solve_v, fused_block_solve_v)
        else:
            op_solve_v, mat_solve_v = sess.run([op_solve, mat_solve])

        # Check that the operator and matrix give the same solution when the rhs is
        # a `Tensor`.
        self.assertAC(op_solve_v, mat_solve_v)
コード例 #4
0
    def solve(self, rhs, adjoint=False, adjoint_arg=False, name="solve"):
        """Solve (exact or approx) `R` (batch) systems of equations: `A X = rhs`.

    The returned `Tensor` will be close to an exact solution if `A` is well
    conditioned. Otherwise closeness will vary. See class docstring for details.

    Given the blockwise `n + 1`-by-`n + 1` linear operator:

    op = [[A_00     0  ...     0  ...    0],
          [A_10  A_11  ...     0  ...    0],
          ...
          [A_k0  A_k1  ...  A_kk  ...    0],
          ...
          [A_n0  A_n1  ...  A_nk  ... A_nn]]

    we find `x = op.solve(y)` by observing that

    `y_k = A_k0.matmul(x_0) + A_k1.matmul(x_1) + ... + A_kk.matmul(x_k)`

    and therefore

    `x_k = A_kk.solve(y_k -
                      A_k0.matmul(x_0) - ... - A_k(k-1).matmul(x_(k-1)))`

    where `x_k` and `y_k` are the `k`th blocks obtained by decomposing `x`
    and `y` along their appropriate axes.

    We first solve `x_0 = A_00.solve(y_0)`. Proceeding inductively, we solve
    for `x_k`, `k = 1..n`, given `x_0..x_(k-1)`.

    The adjoint case is solved similarly, beginning with
    `x_n = A_nn.solve(y_n, adjoint=True)` and proceeding backwards.

    Examples:

    ```python
    # Make an operator acting like batch matrix A.  Assume A.shape = [..., M, N]
    operator = LinearOperator(...)
    operator.shape = [..., M, N]

    # Solve R > 0 linear systems for every member of the batch.
    RHS = ... # shape [..., M, R]

    X = operator.solve(RHS)
    # X[..., :, r] is the solution to the r'th linear system
    # sum_j A[..., :, j] X[..., j, r] = RHS[..., :, r]

    operator.matmul(X)
    ==> RHS
    ```

    Args:
      rhs: `Tensor` with same `dtype` as this operator and compatible shape,
        or a list of `Tensor`s. `Tensor`s are treated like a [batch] matrices
        meaning for every set of leading dimensions, the last two dimensions
        defines a matrix.
        See class docstring for definition of compatibility.
      adjoint: Python `bool`.  If `True`, solve the system involving the adjoint
        of this `LinearOperator`:  `A^H X = rhs`.
      adjoint_arg:  Python `bool`.  If `True`, solve `A X = rhs^H` where `rhs^H`
        is the hermitian transpose (transposition and complex conjugation).
      name:  A name scope to use for ops added by this method.

    Returns:
      `Tensor` with shape `[...,N, R]` and same `dtype` as `rhs`.

    Raises:
      NotImplementedError:  If `self.is_non_singular` or `is_square` is False.
    """
        if self.is_non_singular is False:
            raise NotImplementedError(
                "Exact solve not implemented for an operator that is expected to "
                "be singular.")
        if self.is_square is False:
            raise NotImplementedError(
                "Exact solve not implemented for an operator that is expected to "
                "not be square.")
        if isinstance(rhs, linear_operator.LinearOperator):
            left_operator = self.adjoint() if adjoint else self
            right_operator = rhs.adjoint() if adjoint_arg else rhs

            if (right_operator.range_dimension is not None
                    and left_operator.domain_dimension is not None
                    and right_operator.range_dimension !=
                    left_operator.domain_dimension):
                raise ValueError(
                    "Operators are incompatible. Expected `rhs` to have dimension"
                    " {} but got {}.".format(left_operator.domain_dimension,
                                             right_operator.range_dimension))
            with self._name_scope(name):  # pylint: disable=not-callable
                return linear_operator_algebra.solve(left_operator,
                                                     right_operator)

        with self._name_scope(name):  # pylint: disable=not-callable
            block_dimensions = (self._block_domain_dimensions()
                                if adjoint else self._block_range_dimensions())
            arg_dim = -1 if adjoint_arg else -2
            blockwise_arg = linear_operator_util.arg_is_blockwise(
                block_dimensions, rhs, arg_dim)
            if blockwise_arg:
                for i, block in enumerate(rhs):
                    if not isinstance(block, linear_operator.LinearOperator):
                        block = ops.convert_to_tensor_v2_with_dispatch(block)
                        self._check_input_dtype(block)
                        block_dimensions[i].assert_is_compatible_with(
                            block.shape[arg_dim])
                        rhs[i] = block
                if adjoint_arg:
                    split_rhs = [linalg.adjoint(y) for y in rhs]
                else:
                    split_rhs = rhs

            else:
                rhs = ops.convert_to_tensor_v2_with_dispatch(rhs, name="rhs")
                self._check_input_dtype(rhs)
                op_dimension = (self.domain_dimension
                                if adjoint else self.range_dimension)
                op_dimension.assert_is_compatible_with(rhs.shape[arg_dim])

                rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
                split_rhs = linear_operator_util.split_arg_into_blocks(
                    self._block_domain_dimensions(),
                    self._block_domain_dimension_tensors,
                    rhs,
                    axis=-2)

            solution_list = []
            if adjoint:
                # For an adjoint blockwise lower-triangular linear operator, the system
                # must be solved bottom to top. Iterate backwards over rows of the
                # adjoint (i.e. columns of the non-adjoint operator).
                for index in reversed(range(len(self.operators))):
                    y = split_rhs[index]
                    # Iterate top to bottom over the operators in the off-diagonal portion
                    # of the column-partition (i.e. row-partition of the adjoint), apply
                    # the operator to the respective block of the solution found in
                    # previous iterations, and subtract the result from the `rhs` block.
                    # For example,let `A`, `B`, and `D` be the linear operators in the top
                    # row-partition of the adjoint of
                    # `LinearOperatorBlockLowerTriangular([[A], [B, C], [D, E, F]])`,
                    # and `x_1` and `x_2` be blocks of the solution found in previous
                    # iterations of the outer loop. The following loop (when `index == 0`)
                    # expresses
                    # `Ax_0 + Bx_1 + Dx_2 = y_0` as `Ax_0 = y_0*`, where
                    # `y_0* = y_0 - Bx_1 - Dx_2`.
                    for j in reversed(range(index + 1, len(self.operators))):
                        y = y - self.operators[j][index].matmul(
                            solution_list[len(self.operators) - 1 - j],
                            adjoint=adjoint)
                    # Continuing the example above, solve `Ax_0 = y_0*` for `x_0`.
                    solution_list.append(self._diagonal_operators[index].solve(
                        y, adjoint=adjoint))
                solution_list.reverse()
            else:
                # Iterate top to bottom over the row-partitions.
                for row, y in zip(self.operators, split_rhs):
                    # Iterate left to right over the operators in the off-diagonal portion
                    # of the row-partition, apply the operator to the block of the
                    # solution found in previous iterations, and subtract the result from
                    # the `rhs` block. For example, let `D`, `E`, and `F` be the linear
                    # operators in the bottom row-partition of
                    # `LinearOperatorBlockLowerTriangular([[A], [B, C], [D, E, F]])` and
                    # `x_0` and `x_1` be blocks of the solution found in previous
                    # iterations of the outer loop. The following loop
                    # (when `index == 2`), expresses
                    # `Dx_0 + Ex_1 + Fx_2 = y_2` as `Fx_2 = y_2*`, where
                    # `y_2* = y_2 - D_x0 - Ex_1`.
                    for i, operator in enumerate(row[:-1]):
                        y = y - operator.matmul(solution_list[i],
                                                adjoint=adjoint)
                    # Continuing the example above, solve `Fx_2 = y_2*` for `x_2`.
                    solution_list.append(row[-1].solve(y, adjoint=adjoint))

            if blockwise_arg:
                return solution_list

            solution_list = linear_operator_util.broadcast_matrix_batch_dims(
                solution_list)
            return array_ops.concat(solution_list, axis=-2)
コード例 #5
0
    def _matmul(self, x, adjoint=False, adjoint_arg=False):
        arg_dim = -1 if adjoint_arg else -2
        block_dimensions = (self._block_range_dimensions()
                            if adjoint else self._block_domain_dimensions())
        blockwise_arg = linear_operator_util.arg_is_blockwise(
            block_dimensions, x, arg_dim)
        if blockwise_arg:
            split_x = x
        else:
            split_dim = -1 if adjoint_arg else -2
            # Split input by columns if adjoint_arg is True, else rows
            split_x = linear_operator_util.split_arg_into_blocks(
                self._block_domain_dimensions(),
                self._block_domain_dimension_tensors,
                x,
                axis=split_dim)

        result_list = []
        # Iterate over row-partitions (i.e. column-partitions of the adjoint).
        if adjoint:
            for index in range(len(self.operators)):
                # Begin with the operator on the diagonal and apply it to the
                # respective `rhs` block.
                result = self.operators[index][index].matmul(
                    split_x[index], adjoint=adjoint, adjoint_arg=adjoint_arg)

                # Iterate top to bottom over the operators in the remainder of the
                # column-partition (i.e. left to right over the row-partition of the
                # adjoint), apply the operator to the respective `rhs` block and
                # accumulate the sum. For example, given the
                # `LinearOperatorBlockLowerTriangular`:
                #
                # op = [[A, 0, 0],
                #       [B, C, 0],
                #       [D, E, F]]
                #
                # if `index = 1`, the following loop calculates:
                # `y_1 = (C.matmul(x_1, adjoint=adjoint) +
                #         E.matmul(x_2, adjoint=adjoint)`,
                # where `x_1` and `x_2` are splits of `x`.
                for j in range(index + 1, len(self.operators)):
                    result += self.operators[j][index].matmul(
                        split_x[j], adjoint=adjoint, adjoint_arg=adjoint_arg)
                result_list.append(result)
        else:
            for row in self.operators:
                # Begin with the left-most operator in the row-partition and apply it
                # to the first `rhs` block.
                result = row[0].matmul(split_x[0],
                                       adjoint=adjoint,
                                       adjoint_arg=adjoint_arg)
                # Iterate left to right over the operators in the remainder of the row
                # partition, apply the operator to the respective `rhs` block, and
                # accumulate the sum.
                for j, operator in enumerate(row[1:]):
                    result += operator.matmul(split_x[j + 1],
                                              adjoint=adjoint,
                                              adjoint_arg=adjoint_arg)
                result_list.append(result)

        if blockwise_arg:
            return result_list

        result_list = linear_operator_util.broadcast_matrix_batch_dims(
            result_list)
        return array_ops.concat(result_list, axis=-2)
コード例 #6
0
    def solve(self, rhs, adjoint=False, adjoint_arg=False, name="solve"):
        """Solve (exact or approx) `R` (batch) systems of equations: `A X = rhs`.

    The returned `Tensor` will be close to an exact solution if `A` is well
    conditioned. Otherwise closeness will vary. See class docstring for details.

    Examples:

    ```python
    # Make an operator acting like batch matrix A.  Assume A.shape = [..., M, N]
    operator = LinearOperator(...)
    operator.shape = [..., M, N]

    # Solve R > 0 linear systems for every member of the batch.
    RHS = ... # shape [..., M, R]

    X = operator.solve(RHS)
    # X[..., :, r] is the solution to the r'th linear system
    # sum_j A[..., :, j] X[..., j, r] = RHS[..., :, r]

    operator.matmul(X)
    ==> RHS
    ```

    Args:
      rhs: `Tensor` with same `dtype` as this operator and compatible shape,
        or a list of `Tensor`s (for blockwise operators). `Tensor`s are treated
        like a [batch] matrices meaning for every set of leading dimensions, the
        last two dimensions defines a matrix.
        See class docstring for definition of compatibility.
      adjoint: Python `bool`.  If `True`, solve the system involving the adjoint
        of this `LinearOperator`:  `A^H X = rhs`.
      adjoint_arg:  Python `bool`.  If `True`, solve `A X = rhs^H` where `rhs^H`
        is the hermitian transpose (transposition and complex conjugation).
      name:  A name scope to use for ops added by this method.

    Returns:
      `Tensor` with shape `[...,N, R]` and same `dtype` as `rhs`.

    Raises:
      NotImplementedError:  If `self.is_non_singular` or `is_square` is False.
    """
        if self.is_non_singular is False:
            raise NotImplementedError(
                "Exact solve not implemented for an operator that is expected to "
                "be singular.")
        if self.is_square is False:
            raise NotImplementedError(
                "Exact solve not implemented for an operator that is expected to "
                "not be square.")
        if isinstance(rhs, linear_operator.LinearOperator):
            left_operator = self.adjoint() if adjoint else self
            right_operator = rhs.adjoint() if adjoint_arg else rhs

            if (right_operator.range_dimension is not None
                    and left_operator.domain_dimension is not None
                    and right_operator.range_dimension !=
                    left_operator.domain_dimension):
                raise ValueError(
                    "Operators are incompatible. Expected `rhs` to have dimension"
                    " {} but got {}.".format(left_operator.domain_dimension,
                                             right_operator.range_dimension))
            with self._name_scope(name):
                return linear_operator_algebra.solve(left_operator,
                                                     right_operator)

        with self._name_scope(name):
            block_dimensions = (self._block_domain_dimensions()
                                if adjoint else self._block_range_dimensions())
            arg_dim = -1 if adjoint_arg else -2
            blockwise_arg = linear_operator_util.arg_is_blockwise(
                block_dimensions, rhs, arg_dim)

            if blockwise_arg:
                split_rhs = rhs
                for i, block in enumerate(split_rhs):
                    if not isinstance(block, linear_operator.LinearOperator):
                        block = ops.convert_to_tensor_v2_with_dispatch(block)
                        self._check_input_dtype(block)
                        block_dimensions[i].assert_is_compatible_with(
                            block.shape[arg_dim])
                        split_rhs[i] = block
            else:
                rhs = ops.convert_to_tensor_v2_with_dispatch(rhs, name="rhs")
                self._check_input_dtype(rhs)
                op_dimension = (self.domain_dimension
                                if adjoint else self.range_dimension)
                op_dimension.assert_is_compatible_with(rhs.shape[arg_dim])
                split_dim = -1 if adjoint_arg else -2
                # Split input by rows normally, and otherwise columns.
                split_rhs = linear_operator_util.split_arg_into_blocks(
                    self._block_domain_dimensions(),
                    self._block_domain_dimension_tensors,
                    rhs,
                    axis=split_dim)

            solution_list = []
            for index, operator in enumerate(self.operators):
                solution_list += [
                    operator.solve(split_rhs[index],
                                   adjoint=adjoint,
                                   adjoint_arg=adjoint_arg)
                ]

            if blockwise_arg:
                return solution_list

            solution_list = linear_operator_util.broadcast_matrix_batch_dims(
                solution_list)
            return array_ops.concat(solution_list, axis=-2)