コード例 #1
0
    def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder):
        sess = ops.get_default_session()
        shape = list(shape)

        # Test only the case of 2 matrices.
        # The Square test uses either 1 or 2, so we have tested the case of 1 matrix
        # sufficiently.
        num_operators = 2

        # Create 2 matrices/operators, A1, A2, which becomes A = A1 A2.
        # Use inner dimension of 2.
        k = 2
        batch_shape = shape[:-2]
        shape_1 = batch_shape + [shape[-2], k]
        shape_2 = batch_shape + [k, shape[-1]]

        matrices = [
            linear_operator_test_util.random_normal(shape_1, dtype=dtype),
            linear_operator_test_util.random_normal(shape_2, dtype=dtype)
        ]

        if use_placeholder:
            matrices_ph = [
                array_ops.placeholder(dtype=dtype)
                for _ in range(num_operators)
            ]
            # Evaluate here because (i) you cannot feed a tensor, and (ii)
            # values are random and we want the same value used for both mat and
            # feed_dict.
            matrices = sess.run(matrices)
            operator = linalg.LinearOperatorComposition([
                linalg.LinearOperatorFullMatrix(m_ph) for m_ph in matrices_ph
            ])
            feed_dict = {m_ph: m for (m_ph, m) in zip(matrices_ph, matrices)}
        else:
            operator = linalg.LinearOperatorComposition(
                [linalg.LinearOperatorFullMatrix(m) for m in matrices])
            feed_dict = None

        # Convert back to Tensor.  Needed if use_placeholder, since then we have
        # already evaluated each matrix to a numpy array.
        apply_order_list = list(reversed(matrices))
        mat = ops.convert_to_tensor(apply_order_list[0])
        for other_mat in apply_order_list[1:]:
            mat = math_ops.matmul(other_mat, mat)

        return operator, mat, feed_dict
コード例 #2
0
  def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder):
    sess = ops.get_default_session()
    shape = list(shape)

    # Test only the case of 2 matrices.
    # The Square test uses either 1 or 2, so we have tested the case of 1 matrix
    # sufficiently.
    num_operators = 2

    # Create 2 matrices/operators, A1, A2, which becomes A = A1 A2.
    # Use inner dimension of 2.
    k = 2
    batch_shape = shape[:-2]
    shape_1 = batch_shape + [shape[-2], k]
    shape_2 = batch_shape + [k, shape[-1]]

    matrices = [
        linear_operator_test_util.random_normal(
            shape_1, dtype=dtype), linear_operator_test_util.random_normal(
                shape_2, dtype=dtype)
    ]

    if use_placeholder:
      matrices_ph = [
          array_ops.placeholder(dtype=dtype) for _ in range(num_operators)
      ]
      # Evaluate here because (i) you cannot feed a tensor, and (ii)
      # values are random and we want the same value used for both mat and
      # feed_dict.
      matrices = sess.run(matrices)
      operator = linalg.LinearOperatorComposition(
          [linalg.LinearOperatorFullMatrix(m_ph) for m_ph in matrices_ph])
      feed_dict = {m_ph: m for (m_ph, m) in zip(matrices_ph, matrices)}
    else:
      operator = linalg.LinearOperatorComposition(
          [linalg.LinearOperatorFullMatrix(m) for m in matrices])
      feed_dict = None

    # Convert back to Tensor.  Needed if use_placeholder, since then we have
    # already evaluated each matrix to a numpy array.
    apply_order_list = list(reversed(matrices))
    mat = ops.convert_to_tensor(apply_order_list[0])
    for other_mat in apply_order_list[1:]:
      mat = math_ops.matmul(other_mat, mat)

    return operator, mat, feed_dict
  def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder):
    matrix = linear_operator_test_util.random_normal(shape, dtype=dtype)
    if use_placeholder:
      matrix_ph = array_ops.placeholder(dtype=dtype)
      # Evaluate here because (i) you cannot feed a tensor, and (ii)
      # values are random and we want the same value used for both mat and
      # feed_dict.
      matrix = matrix.eval()
      operator = linalg.LinearOperatorFullMatrix(matrix_ph)
      feed_dict = {matrix_ph: matrix}
    else:
      operator = linalg.LinearOperatorFullMatrix(matrix)
      feed_dict = None

    # Convert back to Tensor.  Needed if use_placeholder, since then we have
    # already evaluated matrix to a numpy array.
    mat = ops.convert_to_tensor(matrix)

    return operator, mat, feed_dict
コード例 #4
0
  def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder):
    # Recall A = L + UDV^H
    shape = list(shape)
    diag_shape = shape[:-1]
    k = shape[-2] // 2 + 1
    u_perturbation_shape = shape[:-1] + [k]
    diag_perturbation_shape = shape[:-2] + [k]

    # base_operator L will be a symmetric positive definite diagonal linear
    # operator, with condition number as high as 1e4.
    base_diag = linear_operator_test_util.random_uniform(
        diag_shape, minval=1e-4, maxval=1., dtype=dtype)
    base_diag_ph = array_ops.placeholder(dtype=dtype)

    # U
    u = linear_operator_test_util.random_normal_correlated_columns(
        u_perturbation_shape, dtype=dtype)
    u_ph = array_ops.placeholder(dtype=dtype)

    # V
    v = linear_operator_test_util.random_normal_correlated_columns(
        u_perturbation_shape, dtype=dtype)
    v_ph = array_ops.placeholder(dtype=dtype)

    # D
    if self._is_diag_positive:
      diag_perturbation = linear_operator_test_util.random_uniform(
          diag_perturbation_shape, minval=1e-4, maxval=1., dtype=dtype)
    else:
      diag_perturbation = linear_operator_test_util.random_normal(
          diag_perturbation_shape, stddev=1e-4, dtype=dtype)
    diag_perturbation_ph = array_ops.placeholder(dtype=dtype)

    if use_placeholder:
      # Evaluate here because (i) you cannot feed a tensor, and (ii)
      # values are random and we want the same value used for both mat and
      # feed_dict.
      base_diag = base_diag.eval()
      u = u.eval()
      v = v.eval()
      diag_perturbation = diag_perturbation.eval()

      # In all cases, set base_operator to be positive definite.
      base_operator = linalg.LinearOperatorDiag(
          base_diag_ph, is_positive_definite=True)

      operator = linalg.LinearOperatorUDVHUpdate(
          base_operator,
          u=u_ph,
          v=v_ph if self._use_v else None,
          diag=diag_perturbation_ph if self._use_diag_perturbation else None,
          is_diag_positive=self._is_diag_positive)
      feed_dict = {
          base_diag_ph: base_diag,
          u_ph: u,
          v_ph: v,
          diag_perturbation_ph: diag_perturbation}
    else:
      base_operator = linalg.LinearOperatorDiag(
          base_diag, is_positive_definite=True)
      operator = linalg.LinearOperatorUDVHUpdate(
          base_operator,
          u,
          v=v if self._use_v else None,
          diag=diag_perturbation if self._use_diag_perturbation else None,
          is_diag_positive=self._is_diag_positive)
      feed_dict = None

    # The matrix representing L
    base_diag_mat = array_ops.matrix_diag(base_diag)

    # The matrix representing D
    diag_perturbation_mat = array_ops.matrix_diag(diag_perturbation)

    # Set up mat as some variant of A = L + UDV^H
    if self._use_v and self._use_diag_perturbation:
      # In this case, we have L + UDV^H and it isn't symmetric.
      expect_use_cholesky = False
      mat = base_diag_mat + math_ops.matmul(
          u, math_ops.matmul(diag_perturbation_mat, v, adjoint_b=True))
    elif self._use_v:
      # In this case, we have L + UDV^H and it isn't symmetric.
      expect_use_cholesky = False
      mat = base_diag_mat + math_ops.matmul(u, v, adjoint_b=True)
    elif self._use_diag_perturbation:
      # In this case, we have L + UDU^H, which is PD if D > 0, since L > 0.
      expect_use_cholesky = self._is_diag_positive
      mat = base_diag_mat + math_ops.matmul(
          u, math_ops.matmul(diag_perturbation_mat, u, adjoint_b=True))
    else:
      # In this case, we have L + UU^H, which is PD since L > 0.
      expect_use_cholesky = True
      mat = base_diag_mat + math_ops.matmul(u, u, adjoint_b=True)

    if expect_use_cholesky:
      self.assertTrue(operator._use_cholesky)
    else:
      self.assertFalse(operator._use_cholesky)

    return operator, mat, feed_dict