def _maximal_eigenvector_power_method(matrix,
                                      epsilon=1e-6,
                                      maximum_iterations=100):
  """Returns the maximal right-eigenvector of `matrix` using the power method.

  Args:
    matrix: 2D Tensor, the matrix of which we will find the maximal
      right-eigenvector.
    epsilon: nonnegative float, if two iterations of the power method differ (in
      L2 norm) by no more than epsilon, we will terminate.
    maximum_iterations: nonnegative int, if we perform this many iterations, we
      will terminate.

  Result:
    The maximal right-eigenvector of `matrix`.

  Raises:
    ValueError: If the `matrix` tensor is not floating-point, or if the
      `epsilon` or `maximum_iterations` parameters violate their bounds.
  """
  if not matrix.dtype.is_floating:
    raise ValueError("multipliers must have a floating-point dtype")
  if epsilon <= 0.0:
    raise ValueError("epsilon must be strictly positive")
  if maximum_iterations <= 0:
    raise ValueError("maximum_iterations must be strictly positive")

  def while_loop_condition(iteration, eigenvector, old_eigenvector):
    """Returns false if the while loop should terminate."""
    not_done = (iteration < maximum_iterations)
    not_converged = (standard_ops.norm(eigenvector - old_eigenvector) > epsilon)
    return standard_ops.logical_and(not_done, not_converged)

  def while_loop_body(iteration, eigenvector, old_eigenvector):
    """Performs one iteration of the power method."""
    del old_eigenvector  # Needed by the condition, but not the body.
    iteration += 1
    # We need to use tf.matmul() and tf.expand_dims(), instead of
    # tf.tensordot(), since the former will infer the shape of the result, while
    # the latter will not (tf.while_loop() needs the shapes).
    new_eigenvector = standard_ops.matmul(
        matrix, standard_ops.expand_dims(eigenvector, 1))[:, 0]
    new_eigenvector /= standard_ops.norm(new_eigenvector)
    return (iteration, new_eigenvector, eigenvector)

  iteration = standard_ops.constant(0)
  eigenvector = standard_ops.ones_like(matrix[:, 0])
  eigenvector /= standard_ops.norm(eigenvector)

  # We actually want a do-while loop, so we explicitly call while_loop_body()
  # once before tf.while_loop().
  iteration, eigenvector, old_eigenvector = while_loop_body(
      iteration, eigenvector, eigenvector)
  iteration, eigenvector, old_eigenvector = control_flow_ops.while_loop(
      while_loop_condition,
      while_loop_body,
      loop_vars=(iteration, eigenvector, old_eigenvector),
      name="power_method")

  return eigenvector
示例#2
0
def _maximal_eigenvector_power_method(matrix,
                                      epsilon=1e-6,
                                      maximum_iterations=100):
    """Returns the maximal right-eigenvector of `matrix` using the power method.

  Args:
    matrix: 2D Tensor, the matrix of which we will find the maximal
      right-eigenvector.
    epsilon: nonnegative float, if two iterations of the power method differ (in
      L2 norm) by no more than epsilon, we will terminate.
    maximum_iterations: nonnegative int, if we perform this many iterations, we
      will terminate.
  Result: The maximal right-eigenvector of `matrix`.

  Raises:
    ValueError: If the `matrix` tensor is not floating-point, or if the
      `epsilon` or `maximum_iterations` parameters violate their bounds.
  """
    if not matrix.dtype.is_floating:
        raise ValueError("multipliers must have a floating-point dtype")
    if epsilon <= 0.0:
        raise ValueError("epsilon must be strictly positive")
    if maximum_iterations <= 0:
        raise ValueError("maximum_iterations must be strictly positive")

    def while_loop_condition(iteration, eigenvector, old_eigenvector):
        """Returns false if the while loop should terminate."""
        not_done = (iteration < maximum_iterations)
        not_converged = (standard_ops.norm(eigenvector - old_eigenvector) >
                         epsilon)
        return standard_ops.logical_and(not_done, not_converged)

    def while_loop_body(iteration, eigenvector, old_eigenvector):
        """Performs one iteration of the power method."""
        del old_eigenvector  # Needed by the condition, but not the body.
        iteration += 1
        # We need to use tf.matmul() and tf.expand_dims(), instead of
        # tf.tensordot(), since the former will infer the shape of the result, while
        # the latter will not (tf.while_loop() needs the shapes).
        new_eigenvector = standard_ops.matmul(
            matrix, standard_ops.expand_dims(eigenvector, 1))[:, 0]
        new_eigenvector /= standard_ops.norm(new_eigenvector)
        return (iteration, new_eigenvector, eigenvector)

    iteration = standard_ops.constant(0)
    eigenvector = standard_ops.ones_like(matrix[:, 0])
    eigenvector /= standard_ops.norm(eigenvector)

    # We actually want a do-while loop, so we explicitly call while_loop_body()
    # once before tf.while_loop().
    iteration, eigenvector, old_eigenvector = while_loop_body(
        iteration, eigenvector, eigenvector)
    iteration, eigenvector, old_eigenvector = control_flow_ops.while_loop(
        while_loop_condition,
        while_loop_body,
        loop_vars=(iteration, eigenvector, old_eigenvector),
        name="power_method")

    return eigenvector
 def while_loop_body(iteration, eigenvector, old_eigenvector):
   """Performs one iteration of the power method."""
   del old_eigenvector  # Needed by the condition, but not the body.
   iteration += 1
   # We need to use tf.matmul() and tf.expand_dims(), instead of
   # tf.tensordot(), since the former will infer the shape of the result, while
   # the latter will not (tf.while_loop() needs the shapes).
   new_eigenvector = standard_ops.matmul(
       matrix, standard_ops.expand_dims(eigenvector, 1))[:, 0]
   new_eigenvector /= standard_ops.norm(new_eigenvector)
   return (iteration, new_eigenvector, eigenvector)
 def while_loop_body(iteration, eigenvector, old_eigenvector):
   """Performs one iteration of the power method."""
   del old_eigenvector  # Needed by the condition, but not the body.
   iteration += 1
   # We need to use tf.matmul() and tf.expand_dims(), instead of
   # tf.tensordot(), since the former will infer the shape of the result, while
   # the latter will not (tf.while_loop() needs the shapes).
   new_eigenvector = standard_ops.matmul(
       matrix, standard_ops.expand_dims(eigenvector, 1))[:, 0]
   new_eigenvector /= standard_ops.norm(new_eigenvector)
   return (iteration, new_eigenvector, eigenvector)
 def while_loop_condition(iteration, eigenvector, old_eigenvector):
   """Returns false if the while loop should terminate."""
   not_done = (iteration < maximum_iterations)
   not_converged = (standard_ops.norm(eigenvector - old_eigenvector) > epsilon)
   return standard_ops.logical_and(not_done, not_converged)
 def while_loop_condition(iteration, eigenvector, old_eigenvector):
   """Returns false if the while loop should terminate."""
   not_done = (iteration < maximum_iterations)
   not_converged = (standard_ops.norm(eigenvector - old_eigenvector) > epsilon)
   return standard_ops.logical_and(not_done, not_converged)