示例#1
0
 def create_dummy_expression(extra_constraints=None):
     """Creates an empty `Expression` with the given extra constraints."""
     return expression.ConstrainedExpression(
         expression.ExplicitExpression(
             basic_expression.BasicExpression([]),
             basic_expression.BasicExpression([])),
         extra_constraints=extra_constraints)
def upper_bound(expressions):
    """Creates an `Expression` upper bounding the given expressions.

  This function introduces a slack variable, and adds constraints forcing this
  variable to upper bound all elements of the given expression list. It then
  returns the slack variable.

  If you're going to be upper-bounding or minimizing the result of this
  function, then you can think of it as taking the `max` of its arguments. You
  should *never* lower-bound or maximize the result, however, since the
  consequence would be to increase the value of the slack variable, without
  affecting the contents of the expressions list.

  Args:
    expressions: list of `Expression`s, the quantities to upper-bound.

  Returns:
    An `Expression` representing an upper bound on the given expressions.

  Raises:
    ValueError: if the expressions list is empty.
    TypeError: if the expressions list contains a non-`Expression`.
  """
    if not expressions:
        raise ValueError(
            "upper_bound cannot be given an empty expression list")
    if not all(isinstance(ee, expression.Expression) for ee in expressions):
        raise TypeError(
            "upper_bound expects a list of rate Expressions (perhaps you need to "
            "call wrap_rate() to create an Expression from a Tensor?)")

    # Ideally the slack variable would have the same dtype as the predictions, but
    # we might not know their dtype (e.g. in eager mode), so instead we always use
    # float32 with auto_cast=True.
    bound = deferred_tensor.DeferredVariable(0.0,
                                             trainable=True,
                                             name="tfco_upper_bound",
                                             dtype=tf.float32,
                                             auto_cast=True)

    bound_basic_expression = basic_expression.BasicExpression(
        [term.TensorTerm(bound)])
    bound_expression = expression.ExplicitExpression(
        penalty_expression=bound_basic_expression,
        constraint_expression=bound_basic_expression)
    extra_constraints = [ee <= bound_expression for ee in expressions]

    # We wrap the result in a BoundedExpression so that we'll check if the user
    # attempts to maximize of lower-bound the result of this function, and will
    # raise an error if they do.
    return expression.BoundedExpression(
        lower_bound=expression.InvalidExpression(
            "the result of a call to upper_bound() can only be minimized or "
            "upper-bounded; it *cannot* be maximized or lower-bounded"),
        upper_bound=expression.ConstrainedExpression(
            expression.ExplicitExpression(
                penalty_expression=bound_basic_expression,
                constraint_expression=bound_basic_expression),
            extra_constraints=extra_constraints))
示例#3
0
    def test_minimization_problem_construction(self):
        """Checks that `RateMinimizationProblem`s are constructed correctly."""
        denominator_lower_bound = 0.0

        penalty_positive_prediction_rates = []
        constraint_positive_prediction_rates = []
        for index in xrange(self._num_datasets):
            predictions = self._predictions[index]
            weights = self._weights[index]
            # For the penalties, the default loss is hinge. These will be used for the
            # "objective" and "proxy_constraints" in the resulting
            # RateMinimizationProblem.
            penalty_positive_prediction_rates.append(
                np.sum(weights * np.maximum(0.0, 1.0 + predictions)) /
                np.sum(weights))
            # For the constraints, the default loss is zero-one. These will be used
            # for the "constraints" in the resulting RateMinimizationProblem.
            constraint_positive_prediction_rates.append(
                np.sum(weights * 0.5 * (1.0 + np.sign(predictions))) /
                np.sum(weights))

        contexts = self._contexts

        # Construct the objective and three constraints. These are as simple as
        # possible, since rates and constraints are tested elsewhere.
        objective = binary_rates.positive_prediction_rate(contexts[0])
        constraint1 = binary_rates.positive_prediction_rate(contexts[1])
        constraint2 = binary_rates.positive_prediction_rate(contexts[2])
        constraint3 = binary_rates.positive_prediction_rate(contexts[3])

        # Make the objective and constraints include each other as:
        #   objective contains constraint2, which contains constraint3
        #   constraint1 contains constraint3
        #   constraint2 contains constraint3
        # Notice that constraint3 is contained in both constraint1 and constraint2,
        # and indirectly in the objective. Despite this, we only want it to occur
        # once in the resulting optimization problem.
        constraint3 = constraint3 <= 0
        constraint1 = (expression.ConstrainedExpression(
            constraint1, extra_constraints=[constraint3]) <= 0)
        constraint2 = (expression.ConstrainedExpression(
            constraint2, extra_constraints=[constraint3]) <= 0)
        objective = expression.ConstrainedExpression(
            objective, extra_constraints=[constraint2])

        problem = rate_minimization_problem.RateMinimizationProblem(
            objective, [constraint1],
            denominator_lower_bound=denominator_lower_bound)

        with self.wrapped_session() as session:
            # In eager mode, tf.Variable.initial_value doesn't work, so we need to
            # cache the initial values for later.
            if tf.executing_eagerly():
                variables_and_initial_values = [
                    (variable, variable.numpy())
                    for variable in problem.variables
                ]

            initial_objective = session.run(problem.objective())
            initial_constraints = session.run(problem.constraints())
            initial_proxy_constraints = session.run(
                problem.proxy_constraints())

            # We only need to run the update ops once, since the entire dataset is
            # contained within the Tensors, so the denominators will be correct.
            session.run_ops(problem.update_ops)

            actual_objective = session.run(problem.objective())
            actual_constraints = session.run(problem.constraints())
            actual_proxy_constraints = session.run(problem.proxy_constraints())

            # If we update the internal state, and then re-initialize, then the
            # resulting objective, constraints and proxy constraints should be the
            # same as they were before running the update_ops.
            if tf.executing_eagerly():
                for variable, initial_value in variables_and_initial_values:
                    variable.assign(initial_value)
            else:
                session.run_ops(lambda: tf.compat.v1.variables_initializer(
                    problem.variables))

            reinitialized_objective = session.run(problem.objective())
            reinitialized_constraints = session.run(problem.constraints())
            reinitialized_proxy_constraints = session.run(
                problem.proxy_constraints())

        self.assertEqual(3, initial_constraints.size)
        self.assertEqual(3, initial_proxy_constraints.size)
        self.assertEqual(3, actual_constraints.size)
        self.assertEqual(3, actual_proxy_constraints.size)
        self.assertEqual(3, reinitialized_constraints.size)
        self.assertEqual(3, reinitialized_proxy_constraints.size)

        # The first context is used for the objective.
        self.assertAllClose(penalty_positive_prediction_rates[0],
                            actual_objective,
                            rtol=0,
                            atol=1e-6)
        # The last three contexts are used for the constraints. They'll occur in no
        # particular order, so we sort before comparing.
        self.assertAllClose(sorted(constraint_positive_prediction_rates[1:]),
                            sorted(actual_constraints),
                            rtol=0,
                            atol=1e-6)
        self.assertAllClose(sorted(penalty_positive_prediction_rates[1:]),
                            sorted(actual_proxy_constraints),
                            rtol=0,
                            atol=1e-6)

        # Make sure that, after re-initialization, we get the same results as we did
        # before executing the update_ops.
        self.assertAllClose(initial_objective,
                            reinitialized_objective,
                            rtol=0,
                            atol=1e-6)
        self.assertAllClose(initial_constraints,
                            reinitialized_constraints,
                            rtol=0,
                            atol=1e-6)
        self.assertAllClose(initial_proxy_constraints,
                            reinitialized_proxy_constraints,
                            rtol=0,
                            atol=1e-6)