コード例 #1
0
def _get_initial_state(value_and_gradients_function, initial_position,
                       num_correction_pairs, tolerance):
    """Create LBfgsOptimizerResults with initial state of search procedure."""
    init_args = bfgs_utils.get_initial_state_args(value_and_gradients_function,
                                                  initial_position, tolerance)
    empty_queue = _make_empty_queue_for(num_correction_pairs, initial_position)
    init_args.update(position_deltas=empty_queue, gradient_deltas=empty_queue)
    return LBfgsOptimizerResults(**init_args)
コード例 #2
0
ファイル: bfgs.py プロジェクト: suryansh2020/mynlp
def minimize(value_and_gradients_function,
             initial_position,
             tolerance=1e-8,
             x_tolerance=0,
             f_relative_tolerance=0,
             initial_inverse_hessian_estimate=None,
             max_iterations=50,
             parallel_iterations=1,
             stopping_condition=None,
             name=None):
  """Applies the BFGS algorithm to minimize a differentiable function.

  Performs unconstrained minimization of a differentiable function using the
  BFGS scheme. For details of the algorithm, see [Nocedal and Wright(2006)][1].

  ### Usage:

  The following example demonstrates the BFGS optimizer attempting to find the
  minimum for a simple two dimensional quadratic objective function.

  ```python
    minimum = np.array([1.0, 1.0])  # The center of the quadratic bowl.
    scales = np.array([2.0, 3.0])  # The scales along the two axes.

    # The objective function and the gradient.
    def quadratic(x):
      value = tf.reduce_sum(scales * (x - minimum) ** 2)
      return value, tf.gradients(value, x)[0]

    start = tf.constant([0.6, 0.8])  # Starting point for the search.
    optim_results = tfp.optimizer.bfgs_minimize(
        quadratic, initial_position=start, tolerance=1e-8)

    with tf.Session() as session:
      results = session.run(optim_results)
      # Check that the search converged
      assert(results.converged)
      # Check that the argmin is close to the actual value.
      np.testing.assert_allclose(results.position, minimum)
      # Print out the total number of function evaluations it took. Should be 6.
      print ("Function evaluations: %d" % results.num_objective_evaluations)
  ```

  ### References:
  [1]: Jorge Nocedal, Stephen Wright. Numerical Optimization. Springer Series in
    Operations Research. pp 136-140. 2006
    http://pages.mtu.edu/~struther/Courses/OLD/Sp2013/5630/Jorge_Nocedal_Numerical_optimization_267490.pdf

  Args:
    value_and_gradients_function:  A Python callable that accepts a point as a
      real `Tensor` and returns a tuple of `Tensor`s of real dtype containing
      the value of the function and its gradient at that point. The function
      to be minimized. The input should be of shape `[..., n]`, where `n` is
      the size of the domain of input points, and all others are batching
      dimensions. The first component of the return value should be a real
      `Tensor` of matching shape `[...]`. The second component (the gradient)
      should also be of shape `[..., n]` like the input value to the function.
    initial_position: real `Tensor` of shape `[..., n]`. The starting point, or
      points when using batching dimensions, of the search procedure. At these
      points the function value and the gradient norm should be finite.
    tolerance: Scalar `Tensor` of real dtype. Specifies the gradient tolerance
      for the procedure. If the supremum norm of the gradient vector is below
      this number, the algorithm is stopped.
    x_tolerance: Scalar `Tensor` of real dtype. If the absolute change in the
      position between one iteration and the next is smaller than this number,
      the algorithm is stopped.
    f_relative_tolerance: Scalar `Tensor` of real dtype. If the relative change
      in the objective value between one iteration and the next is smaller
      than this value, the algorithm is stopped.
    initial_inverse_hessian_estimate: Optional `Tensor` of the same dtype
      as the components of the output of the `value_and_gradients_function`.
      If specified, the shape should broadcastable to shape `[..., n, n]`; e.g.
      if a single `[n, n]` matrix is provided, it will be automatically
      broadcasted to all batches. Alternatively, one can also specify a
      different hessian estimate for each batch member.
      For the correctness of the algorithm, it is required that this parameter
      be symmetric and positive definite. Specifies the starting estimate for
      the inverse of the Hessian at the initial point. If not specified,
      the identity matrix is used as the starting estimate for the
      inverse Hessian.
    max_iterations: Scalar positive int32 `Tensor`. The maximum number of
      iterations for BFGS updates.
    parallel_iterations: Positive integer. The number of iterations allowed to
      run in parallel.
    stopping_condition: (Optional) A Python function that takes as input two
      Boolean tensors of shape `[...]`, and returns a Boolean scalar tensor.
      The input tensors are `converged` and `failed`, indicating the current
      status of each respective batch member; the return value states whether
      the algorithm should stop. The default is tfp.optimizer.converged_all
      which only stops when all batch members have either converged or failed.
      An alternative is tfp.optimizer.converged_any which stops as soon as one
      batch member has converged, or when all have failed.
    name: (Optional) Python str. The name prefixed to the ops created by this
      function. If not supplied, the default name 'minimize' is used.

  Returns:
    optimizer_results: A namedtuple containing the following items:
      converged: boolean tensor of shape `[...]` indicating for each batch
        member whether the minimum was found within tolerance.
      failed:  boolean tensor of shape `[...]` indicating for each batch
        member whether a line search step failed to find a suitable step size
        satisfying Wolfe conditions. In the absence of any constraints on the
        number of objective evaluations permitted, this value will
        be the complement of `converged`. However, if there is
        a constraint and the search stopped due to available
        evaluations being exhausted, both `failed` and `converged`
        will be simultaneously False.
      num_objective_evaluations: The total number of objective
        evaluations performed.
      position: A tensor of shape `[..., n]` containing the last argument value
        found during the search from each starting point. If the search
        converged, then this value is the argmin of the objective function.
      objective_value: A tensor of shape `[...]` with the value of the
        objective function at the `position`. If the search converged, then
        this is the (local) minimum of the objective function.
      objective_gradient: A tensor of shape `[..., n]` containing the gradient
        of the objective function at the `position`. If the search converged
        the max-norm of this tensor should be below the tolerance.
      inverse_hessian_estimate: A tensor of shape `[..., n, n]` containing the
        inverse of the estimated Hessian.
  """
  with tf.compat.v1.name_scope(
      name, 'minimize',
      [initial_position, tolerance, initial_inverse_hessian_estimate]):
    initial_position = tf.convert_to_tensor(
        value=initial_position, name='initial_position')
    dtype = initial_position.dtype.base_dtype
    tolerance = tf.convert_to_tensor(
        value=tolerance, dtype=dtype, name='grad_tolerance')
    f_relative_tolerance = tf.convert_to_tensor(
        value=f_relative_tolerance, dtype=dtype, name='f_relative_tolerance')
    x_tolerance = tf.convert_to_tensor(
        value=x_tolerance, dtype=dtype, name='x_tolerance')
    max_iterations = tf.convert_to_tensor(
        value=max_iterations, name='max_iterations')

    input_shape = distribution_util.prefer_static_shape(initial_position)
    batch_shape, domain_size = input_shape[:-1], input_shape[-1]

    if stopping_condition is None:
      stopping_condition = bfgs_utils.converged_all

    # Control inputs are an optional list of tensors to evaluate before
    # the start of the search procedure. These can be used to assert the
    # validity of inputs to the search procedure.
    control_inputs = None

    if initial_inverse_hessian_estimate is None:
      # Create a default initial inverse Hessian.
      initial_inv_hessian = tf.eye(domain_size,
                                   batch_shape=batch_shape,
                                   dtype=dtype,
                                   name='initial_inv_hessian')
    else:
      # If an initial inverse Hessian is supplied, compute some control inputs
      # to ensure that it is positive definite and symmetric.
      initial_inv_hessian = tf.convert_to_tensor(
          value=initial_inverse_hessian_estimate,
          dtype=dtype,
          name='initial_inv_hessian')
      control_inputs = _inv_hessian_control_inputs(initial_inv_hessian)
      hessian_shape = tf.concat([batch_shape, [domain_size, domain_size]], 0)
      initial_inv_hessian = tf.broadcast_to(initial_inv_hessian, hessian_shape)

    # The `state` here is a `BfgsOptimizerResults` tuple with values for the
    # current state of the algorithm computation.
    def _cond(state):
      """Continue if iterations remain and stopping condition is not met."""
      return ((state.num_iterations < max_iterations) &
              tf.logical_not(stopping_condition(state.converged, state.failed)))

    def _body(state):
      """Main optimization loop."""
      search_direction = _get_search_direction(state.inverse_hessian_estimate,
                                               state.objective_gradient)
      derivative_at_start_pt = tf.reduce_sum(
          input_tensor=state.objective_gradient * search_direction, axis=-1)

      # If the derivative at the start point is not negative, recompute the
      # search direction with the initial inverse Hessian.
      needs_reset = (~state.failed & ~state.converged &
                     (derivative_at_start_pt >= 0))

      search_direction_reset = _get_search_direction(
          initial_inv_hessian, state.objective_gradient)

      actual_serch_direction = tf.compat.v1.where(needs_reset,
                                                  search_direction_reset,
                                                  search_direction)
      actual_inv_hessian = tf.compat.v1.where(needs_reset, initial_inv_hessian,
                                              state.inverse_hessian_estimate)

      # Replace the hessian estimate in the state, in case it had to be reset.
      current_state = bfgs_utils.update_fields(
          state, inverse_hessian_estimate=actual_inv_hessian)

      next_state = bfgs_utils.line_search_step(
          current_state,
          value_and_gradients_function, actual_serch_direction,
          tolerance, f_relative_tolerance, x_tolerance, stopping_condition)

      # Update the inverse Hessian if needed and continue.
      return [_update_inv_hessian(current_state, next_state)]

    kwargs = bfgs_utils.get_initial_state_args(
        value_and_gradients_function,
        initial_position,
        tolerance,
        control_inputs)
    kwargs['inverse_hessian_estimate'] = initial_inv_hessian
    initial_state = BfgsOptimizerResults(**kwargs)
    return tf.while_loop(
        cond=_cond,
        body=_body,
        loop_vars=[initial_state],
        parallel_iterations=parallel_iterations)[0]
コード例 #3
0
ファイル: bfgs.py プロジェクト: zhouyonglong/probability
def minimize(value_and_gradients_function,
             initial_position,
             tolerance=1e-8,
             x_tolerance=0,
             f_relative_tolerance=0,
             initial_inverse_hessian_estimate=None,
             max_iterations=50,
             parallel_iterations=1,
             name=None):
    """Applies the BFGS algorithm to minimize a differentiable function.

  Performs unconstrained minimization of a differentiable function using the
  BFGS scheme. For details of the algorithm, see [Nocedal and Wright(2006)][1].

  ### Usage:

  The following example demonstrates the BFGS optimizer attempting to find the
  minimum for a simple two dimensional quadratic objective function.

  ```python
    minimum = np.array([1.0, 1.0])  # The center of the quadratic bowl.
    scales = np.array([2.0, 3.0])  # The scales along the two axes.

    # The objective function and the gradient.
    def quadratic(x):
      value = tf.reduce_sum(scales * (x - minimum) ** 2)
      return value, tf.gradients(value, x)[0]

    start = tf.constant([0.6, 0.8])  # Starting point for the search.
    optim_results = tfp.optimizer.bfgs_minimize(
        quadratic, initial_position=start, tolerance=1e-8)

    with tf.Session() as session:
      results = session.run(optim_results)
      # Check that the search converged
      assert(results.converged)
      # Check that the argmin is close to the actual value.
      np.testing.assert_allclose(results.position, minimum)
      # Print out the total number of function evaluations it took. Should be 6.
      print ("Function evaluations: %d" % results.num_objective_evaluations)
  ```

  ### References:
  [1]: Jorge Nocedal, Stephen Wright. Numerical Optimization. Springer Series in
    Operations Research. pp 136-140. 2006
    http://pages.mtu.edu/~struther/Courses/OLD/Sp2013/5630/Jorge_Nocedal_Numerical_optimization_267490.pdf

  Args:
    value_and_gradients_function:  A Python callable that accepts a point as a
      real `Tensor` and returns a tuple of `Tensor`s of real dtype containing
      the value of the function and its gradient at that point. The function
      to be minimized. The first component of the return value should be a
      real scalar `Tensor`. The second component (the gradient) should have the
      same shape as the input value to the function.
    initial_position: `Tensor` of real dtype. The starting point of the search
      procedure. Should be a point at which the function value and the gradient
      norm are finite.
    tolerance: Scalar `Tensor` of real dtype. Specifies the gradient tolerance
      for the procedure. If the supremum norm of the gradient vector is below
      this number, the algorithm is stopped.
    x_tolerance: Scalar `Tensor` of real dtype. If the absolute change in the
      position between one iteration and the next is smaller than this number,
      the algorithm is stopped.
    f_relative_tolerance: Scalar `Tensor` of real dtype. If the relative change
      in the objective value between one iteration and the next is smaller
      than this value, the algorithm is stopped.
    initial_inverse_hessian_estimate: Optional `Tensor` of the same dtype
      as the components of the output of the `value_and_gradients_function`.
      If specified, the shape should be `initial_position.shape` * 2.
      For example, if the shape of `initial_position` is `[n]`, then the
      acceptable shape of `initial_inverse_hessian_estimate` is as a square
      matrix of shape `[n, n]`.
      If the shape of `initial_position` is `[n, m]`, then the required shape
      is `[n, m, n, m]`.
      For the correctness of the algorithm, it is required that this parameter
      be symmetric and positive definite. Specifies the starting estimate for
      the inverse of the Hessian at the initial point. If not specified,
      the identity matrix is used as the starting estimate for the
      inverse Hessian.
    max_iterations: Scalar positive int32 `Tensor`. The maximum number of
      iterations for BFGS updates.
    parallel_iterations: Positive integer. The number of iterations allowed to
      run in parallel.
    name: (Optional) Python str. The name prefixed to the ops created by this
      function. If not supplied, the default name 'minimize' is used.

  Returns:
    optimizer_results: A namedtuple containing the following items:
      converged: Scalar boolean tensor indicating whether the minimum was
        found within tolerance.
      failed:  Scalar boolean tensor indicating whether a line search
        step failed to find a suitable step size satisfying Wolfe
        conditions. In the absence of any constraints on the
        number of objective evaluations permitted, this value will
        be the complement of `converged`. However, if there is
        a constraint and the search stopped due to available
        evaluations being exhausted, both `failed` and `converged`
        will be simultaneously False.
      num_objective_evaluations: The total number of objective
        evaluations performed.
      position: A tensor containing the last argument value found
        during the search. If the search converged, then
        this value is the argmin of the objective function.
      objective_value: A tensor containing the value of the objective
        function at the `position`. If the search converged, then this is
        the (local) minimum of the objective function.
      objective_gradient: A tensor containing the gradient of the objective
        function at the `position`. If the search converged the
        max-norm of this tensor should be below the tolerance.
      inverse_hessian_estimate: A tensor containing the inverse of the
        estimated Hessian.
  """
    with tf.name_scope(
            name, 'minimize',
        [initial_position, tolerance, initial_inverse_hessian_estimate]):
        initial_position = tf.convert_to_tensor(initial_position,
                                                name='initial_position')
        dtype = initial_position.dtype.base_dtype
        tolerance = tf.convert_to_tensor(tolerance,
                                         dtype=dtype,
                                         name='grad_tolerance')
        f_relative_tolerance = tf.convert_to_tensor(
            f_relative_tolerance, dtype=dtype, name='f_relative_tolerance')
        x_tolerance = tf.convert_to_tensor(x_tolerance,
                                           dtype=dtype,
                                           name='x_tolerance')
        max_iterations = tf.convert_to_tensor(max_iterations,
                                              name='max_iterations')

        if initial_inverse_hessian_estimate is None:
            # Control inputs are an optional list of tensors to evaluate before
            # the start of the search procedure. These can be used to assert the
            # validity of inputs to the search procedure.
            control_inputs = None
            domain_shape = distribution_util.prefer_static_shape(
                initial_position)
            inv_hessian_shape = tf.concat([domain_shape, domain_shape], 0)
            initial_inv_hessian = tf.eye(tf.size(initial_position),
                                         dtype=dtype)
            initial_inv_hessian = tf.reshape(initial_inv_hessian,
                                             inv_hessian_shape,
                                             name='initial_inv_hessian')
        else:
            # If an initial inverse Hessian is supplied, these control inputs ensure
            # that it is positive definite and symmetric.
            initial_inv_hessian = tf.convert_to_tensor(
                initial_inverse_hessian_estimate,
                dtype=dtype,
                name='initial_inv_hessian')
            control_inputs = _inv_hessian_control_inputs(
                initial_inv_hessian, initial_position)

        # The `state` here is a `BfgsOptimizerResults` tuple with values for the
        # current state of the algorithm computation.
        def _cond(state):
            """Stopping condition for the algorithm."""
            keep_going = tf.logical_not(state.converged | state.failed | (
                state.num_iterations >= max_iterations))
            return keep_going

        def _body(state):
            """Main optimization loop."""

            search_direction = _get_search_direction(
                state.inverse_hessian_estimate, state.objective_gradient)
            derivative_at_start_pt = tf.reduce_sum(state.objective_gradient *
                                                   search_direction)
            # If the derivative at the start point is not negative, reset the
            # Hessian estimate and recompute the search direction.
            needs_reset = derivative_at_start_pt >= 0

            def _reset_search_dirn():
                search_direction = _get_search_direction(
                    initial_inv_hessian, state.objective_gradient)
                return search_direction, initial_inv_hessian

            search_direction, inv_hessian_estimate = tf.contrib.framework.smart_cond(
                needs_reset,
                true_fn=_reset_search_dirn,
                false_fn=lambda:
                (search_direction, state.inverse_hessian_estimate))

            # Replace the hessian estimate in the state, in case it had to be reset.
            current_state = bfgs_utils.update_fields(
                state, inverse_hessian_estimate=inv_hessian_estimate)

            next_state = bfgs_utils.line_search_step(
                current_state, value_and_gradients_function, search_direction,
                tolerance, f_relative_tolerance, x_tolerance)

            # If not failed or converged, update the Hessian.
            state_after_inv_hessian_update = tf.contrib.framework.smart_cond(
                next_state.converged | next_state.failed, lambda: next_state,
                lambda: _update_inv_hessian(current_state, next_state))
            return [state_after_inv_hessian_update]

        kwargs = bfgs_utils.get_initial_state_args(
            value_and_gradients_function, initial_position, tolerance,
            control_inputs)
        kwargs['inverse_hessian_estimate'] = initial_inv_hessian
        initial_state = BfgsOptimizerResults(**kwargs)
        return tf.while_loop(_cond,
                             _body, [initial_state],
                             parallel_iterations=parallel_iterations)[0]