Пример #1
0
  def test_predicate(self):
    """Tests the `Predicate` class."""
    structure_memoizer = {
        defaults.DENOMINATOR_LOWER_BOUND_KEY: 0.0,
        defaults.GLOBAL_STEP_KEY: tf.Variable(0, dtype=tf.int32),
        defaults.VARIABLE_FN_KEY: tf.Variable
    }

    predicate1 = predicate.Predicate([-0.2, 0.4, 1.0, 0.3])
    predicate2 = predicate.Predicate([0.8, 1.1, 0.6, 0.0])

    # We'll calculate the XOR of predicate1 and predicate2 in three ways. This
    # is the expected result.
    expected = [0.8, 0.6, 0.4, 0.3]

    actual1 = predicate1 ^ predicate2
    actual2 = (predicate1 & ~predicate2) | (~predicate1 & predicate2)
    actual3 = (predicate1 | predicate2) & ~(predicate1 & predicate2)

    with self.wrapped_session() as session:
      self.assertAllClose(
          expected,
          session.run(actual1.tensor(structure_memoizer)),
          rtol=0,
          atol=1e-6)
      self.assertAllClose(
          expected,
          session.run(actual2.tensor(structure_memoizer)),
          rtol=0,
          atol=1e-6)
      self.assertAllClose(
          expected,
          session.run(actual3.tensor(structure_memoizer)),
          rtol=0,
          atol=1e-6)
Пример #2
0
    def test_ratio_weights_memoizer(self):
        """Tests memoization."""
        memoizer = {
            defaults.DENOMINATOR_LOWER_BOUND_KEY: 0.0,
            defaults.GLOBAL_STEP_KEY: tf.compat.v2.Variable(0, dtype=tf.int32)
        }

        weights_tensor = deferred_tensor.DeferredTensor(
            tf.constant([0.5, 0.1, 1.0], dtype=tf.float32))
        numerator1_tensor = deferred_tensor.DeferredTensor(
            tf.constant([True, False, True], dtype=tf.bool))
        numerator2_tensor = deferred_tensor.DeferredTensor(
            tf.constant([True, True, False], dtype=tf.bool))
        numerator1_predicate = predicate.Predicate(numerator1_tensor)
        numerator2_predicate = predicate.Predicate(numerator2_tensor)
        denominator_predicate = predicate.Predicate(True)

        ratio_weights1 = term._RatioWeights.ratio(weights_tensor,
                                                  numerator1_predicate,
                                                  denominator_predicate)
        ratio_weights2 = term._RatioWeights.ratio(weights_tensor,
                                                  numerator2_predicate,
                                                  denominator_predicate)
        result1, variables1 = ratio_weights1.evaluate(memoizer)
        result2, variables2 = ratio_weights2.evaluate(memoizer)

        # The numerators differ, so the results should be different, but the
        # weights and denominators match, so the variables should be the same.
        self.assertIsNot(result1, result2)
        self.assertEqual(variables1, variables2)
Пример #3
0
 def create_binary_classification_term(predictions_tensor,
                                       positive_weights_tensor,
                                       negative_weights_tensor):
     positive_ratio_weights = term._RatioWeights.ratio(
         deferred_tensor.DeferredTensor(positive_weights_tensor),
         predicate.Predicate(True), predicate.Predicate(True))
     negative_ratio_weights = term._RatioWeights.ratio(
         deferred_tensor.DeferredTensor(negative_weights_tensor),
         predicate.Predicate(True), predicate.Predicate(True))
     return term.BinaryClassificationTerm(
         deferred_tensor.DeferredTensor(predictions_tensor),
         positive_ratio_weights, negative_ratio_weights,
         loss.HingeLoss())
def _rate_context_helper(predictions, labels, weights, num_classes):
    """Helper for rate_context() and multiclass_rate_context()."""

    # Ideally, we'd check that these objects are Tensors, or are types that can be
    # converted to Tensors. Unfortunately, this includes a lot of possible types,
    # so the easiest solution would be to actually perform the conversion, and
    # then check that the resulting Tensor has only one element. This, however,
    # would add a dummy element to the Tensorflow graph, and wouldn't work for a
    # Tensor with an unknown size. Hence, we only check that they are not types
    # that we know for certain are disallowed: objects internal to this library.
    if isinstance(predictions, helpers.RateObject):
        raise TypeError(
            "predictions parameter to rate_context() should be a "
            "Tensor-like object, or a nullary function returning such")
    if isinstance(labels, helpers.RateObject):
        raise TypeError(
            "labels parameter to rate_context() should be a "
            "Tensor-like object, or a nullary function returning such")
    if isinstance(weights, helpers.RateObject):
        raise TypeError(
            "weights parameter to rate_context() should be a "
            "Tensor-like object, or a nullary function returning such")

    if tf.executing_eagerly():
        if not callable(predictions):
            raise TypeError(
                "in eager mode, the predictions provided to a context "
                "must be a nullary function returning a Tensor (to fix "
                "this, consider wrapping it in a lambda)")
        # Unlike the predictions, which *must* be callable, we allow non-Tensor
        # constants (e.g. python scalars or numpy arrays) for the labels and
        # weights. However, they cannot be ordinary Tensors.
        if tf.is_tensor(labels):
            raise TypeError(
                "in eager mode, the labels provided to a context must "
                "either be a constant, or a nullary function returning "
                "a Tensor: it cannot be a plain Tensor (to fix this, "
                "consider wrapping it in a lambda)")
        if tf.is_tensor(weights):
            raise TypeError(
                "in eager mode, the weights provided to a context must "
                "either be a constant, or a nullary function returning "
                "a Tensor: it cannot be a plain Tensor (to fix this, "
                "consider wrapping it in a lambda)")

    predictions = deferred_tensor.ExplicitDeferredTensor(predictions)
    if labels is not None:
        labels = deferred_tensor.ExplicitDeferredTensor(labels)
    weights = deferred_tensor.ExplicitDeferredTensor(weights)

    raw_context = _RawContext(penalty_predictions=predictions,
                              penalty_labels=labels,
                              penalty_weights=weights,
                              constraint_predictions=predictions,
                              constraint_labels=labels,
                              constraint_weights=weights,
                              num_classes=num_classes)
    true_predicate = predicate.Predicate(True)
    return SubsettableContext(raw_context, true_predicate, true_predicate)
Пример #5
0
 def create_multiclass_term(predictions_tensor, positive_weights_tensor,
                            negative_weights_tensor):
     key = (1.0, predicate.Predicate(True))
     value = deferred_tensor.ExplicitDeferredTensor(
         tf.stack([positive_weights_tensor, negative_weights_tensor],
                  axis=1))
     ratio_weights = term._RatioWeights({key: value}, 2)
     return term.MulticlassTerm(
         deferred_tensor.ExplicitDeferredTensor(predictions_tensor),
         ratio_weights, loss.HingeLoss())
Пример #6
0
    def test_not_merging(self):
        """Checks that `BasicExpression`s don't merge incompatible `Term`s."""
        predictions = deferred_tensor.ExplicitDeferredTensor(
            tf.constant([1.0, -1.0, 0.5], dtype=tf.float32))
        weights1 = deferred_tensor.ExplicitDeferredTensor(1.0)
        weights2 = deferred_tensor.ExplicitDeferredTensor(
            tf.constant([0.7, 0.3, 1.0], dtype=tf.float32))
        numerator_predicate1 = predicate.Predicate(True)
        numerator_predicate2 = predicate.Predicate(
            tf.constant([True, False, False]))
        denominator_predicate1 = predicate.Predicate(True)
        denominator_predicate2 = predicate.Predicate(
            tf.constant([True, False, True]))
        # The two terms have different losses, so they're incompatible.
        term_object1 = term.BinaryClassificationTerm.ratio(
            1.0, 0.0, predictions, weights1, numerator_predicate1,
            denominator_predicate1, loss.ZeroOneLoss())
        term_object2 = term.BinaryClassificationTerm.ratio(
            1.0, 0.0, predictions, weights2, numerator_predicate2,
            denominator_predicate2, loss.HingeLoss())
        self.assertNotEqual(term_object1.key, term_object2.key)

        expression_object1 = basic_expression.BasicExpression([term_object1])
        self.assertEqual(1, len(expression_object1._terms))
        expression_object2 = basic_expression.BasicExpression([term_object2])
        self.assertEqual(1, len(expression_object2._terms))

        # Check that __init__ doesn't merge incompatible terms.
        expression_object = basic_expression.BasicExpression(
            [term_object1, term_object2])
        self.assertEqual(2, len(expression_object._terms))
        # Check that __add__ doesn't merge incompatible terms.
        expression_object = expression_object1 + expression_object2
        self.assertEqual(2, len(expression_object._terms))
        # Check that __sub__ doesn't merge incompatible terms.
        expression_object = expression_object1 - expression_object2
        self.assertEqual(2, len(expression_object._terms))
def _split_rate_context_helper(penalty_predictions, constraint_predictions,
                               penalty_labels, constraint_labels,
                               penalty_weights, constraint_weights,
                               num_classes):
    """Helper for split_rate_context() and multiclass_split_rate_context()."""

    # See comment in _rate_context_helper.
    if isinstance(penalty_predictions, helpers.RateObject):
        raise TypeError(
            "penalty_predictions parameter to split_rate_context() "
            "should be a Tensor-like object, or a nullary function "
            "returning such")
    if isinstance(constraint_predictions, helpers.RateObject):
        raise TypeError(
            "constraint_predictions parameter to split_rate_context() "
            "should be a Tensor-like object, or a nullary function "
            "returning such")
    if isinstance(penalty_labels, helpers.RateObject):
        raise TypeError(
            "penalty_labels parameter to split_rate_context() should "
            "be a Tensor-like object, or a nullary function returning "
            "such")
    if isinstance(constraint_labels, helpers.RateObject):
        raise TypeError(
            "constraint_labels parameter to split_rate_context() "
            "should be a Tensor-like object, or a nullary function "
            "returning such")
    if isinstance(penalty_weights, helpers.RateObject):
        raise TypeError(
            "penalty_weights parameter to split_rate_context() should "
            "be a Tensor-like object, or a nullary function returning "
            "such")
    if isinstance(constraint_weights, helpers.RateObject):
        raise TypeError(
            "constraint_weights parameter to split_rate_context() "
            "should be a Tensor-like object, or a nullary function "
            "returning such")

    if tf.executing_eagerly():
        if not (callable(penalty_predictions)
                and callable(constraint_predictions)):
            raise TypeError(
                "in eager mode, the predictions provided to a context "
                "must be a nullary function returning a Tensor (to fix "
                "this, consider wrapping it in a lambda)")
        # Unlike the predictions, which *must* be callable, we allow non-Tensor
        # constants (e.g. python scalars or numpy arrays) for the labels and
        # weights. However, they cannot be ordinary Tensors.
        if tf.is_tensor(penalty_labels) or tf.is_tensor(constraint_labels):
            raise TypeError(
                "in eager mode, the labels provided to a context must "
                "either be a constant, or a nullary function returning "
                "a Tensor: it cannot be a plain Tensor (to fix this, "
                "consider wrapping it in a lambda)")
        if tf.is_tensor(penalty_weights) or tf.is_tensor(constraint_weights):
            raise TypeError(
                "in eager mode, the weights provided to a context must "
                "either be a constant, or a nullary function returning "
                "a Tensor: it cannot be a plain Tensor (to fix this, "
                "consider wrapping it in a lambda)")

    penalty_predictions = deferred_tensor.ExplicitDeferredTensor(
        penalty_predictions)
    if penalty_labels is not None:
        penalty_labels = deferred_tensor.ExplicitDeferredTensor(penalty_labels)
    penalty_weights = deferred_tensor.ExplicitDeferredTensor(penalty_weights)

    constraint_predictions = deferred_tensor.ExplicitDeferredTensor(
        constraint_predictions)
    if constraint_labels is not None:
        constraint_labels = deferred_tensor.ExplicitDeferredTensor(
            constraint_labels)
    constraint_weights = deferred_tensor.ExplicitDeferredTensor(
        constraint_weights)

    raw_context = _RawContext(penalty_predictions=penalty_predictions,
                              penalty_labels=penalty_labels,
                              penalty_weights=penalty_weights,
                              constraint_predictions=constraint_predictions,
                              constraint_labels=constraint_labels,
                              constraint_weights=constraint_weights,
                              num_classes=num_classes)
    true_predicate = predicate.Predicate(True)
    return SubsettableContext(raw_context, true_predicate, true_predicate)
    def subset(self, penalty_predicate, constraint_predicate=None):
        """Returns a subset of this context.

    The two predicates should be boolean `Tensor`s of the same size as the
    predictions `Tensor` from which the top-level context was constructed. If an
    element of the predicate `Tensor` is True, and the corresponding example is
    included in this context, then the example will be included in the resulting
    context. Otherwise, it will not.

    A "split context" contains two sets of predictions (and optionally labels
    and weights). When subsetting a split context, two predicates must be
    provided to this method: the first for the penalty portion, and the second
    for the constraint portion. Alternatively, if you want to create a split
    context from a non-split one, then you can do so by providing both predicate
    arguments explicitly.

    This method is here for convenience, but it comes at a cost. You should use
    subsetting *with great caution*. If, for example, you wish to create a rate
    only on the set of "blue" examples, then it will almost always be better
    (but more complicated) to create an entirely separate dataset containing
    only "blue" examples (e.g. using the "filter" method of a
    `tf.data.Dataset`), rather than taking the "blue" subset of a dataset that
    also contains "red" and "green" examples.

    The reason for this is that, if using subsetting, each minibatch will
    contain varying numbers of "blue" examples during training. As a
    consequence, we'll sometimes perform too-small updates, and sometimes
    overcorrect with extremely large updates. This problem is less serious if
    "blue" examples are common, but can be fatal if "blue" examples are
    extremely rare.

    If, instead of subsetting, we were to create an entirely separate "blue"
    dataset, then every minibatch would contain the same number of "blue"
    examples, and optimization would proceed more smoothly.

    Args:
      penalty_predicate: boolean `Tensor` with the size as the underlying
        predictions `Tensor` (or broadcastable to it), each element of which
        indicates whether the corresponding example should be included in the
        subset.
      constraint_predicate: optional boolean `Tensor`, playing the same role as
        "penalty_predicate", but for the constraints portion of the context.

    Returns:
      `SubsettableContext` representing the subset of this context on which
      penalty_predicate (and constraint_predicate, if applicable) are True.

    Raises:
      ValueError: if no constraint_predicate is provided, but this is a split
        context.
    """
        if constraint_predicate is None:
            # It's fine if the labels and/or weights are different.
            if (self._raw_context.penalty_predictions !=
                    self._raw_context.constraint_predictions
                    or self._penalty_predicate != self._constraint_predicate):
                raise ValueError("constraint_predicate must be provided when "
                                 "subsetting a split context")
            constraint_predicate = penalty_predicate

        # In eager mode, we do not permit ordinary constant Tensors to be passed as
        # predicates: only lambdas. Additionally, we allow non-Tensor constants
        # (e.g. python scalars or numpy arrays) and DeferredTensors (for internal
        # use).
        #
        # The reason for this is that, in eager mode, we want to prevent users from
        # passing a constant when they intend to use a variable. For example:
        #   context.subset(some_variable > 0.5)
        # This is probably a bug, since if, later in the program, the value of
        # "some_variable" changes, the value of the earlier evaluation of
        # "some_variable > 0.5" will not. To prevent this, we have checks that force
        # the user to use something like:
        #   context.subset(lambda: some_variable > 0.5)
        # which will work fine even if "some_variable" subsequently changes.
        if tf.executing_eagerly() and (tf.is_tensor(penalty_predicate)
                                       or tf.is_tensor(constraint_predicate)):
            raise ValueError(
                "in eager mode, the predicate provided to a context's "
                "subset() method must either be a constant, or a "
                "nullary function returning a Tensor: it cannot be a"
                "plain Tensor (to fix this, consider wrapping it in a "
                "lambda)")

        # First convert the predicate Tensors into DeferredTensors, so that we can
        # use the __eq__ operator.
        if not isinstance(penalty_predicate, deferred_tensor.DeferredTensor):
            penalty_predicate = deferred_tensor.ExplicitDeferredTensor(
                penalty_predicate)
        if not isinstance(constraint_predicate,
                          deferred_tensor.DeferredTensor):
            constraint_predicate = deferred_tensor.ExplicitDeferredTensor(
                constraint_predicate)

        # Convert the boolean predicates to Predicate objects. Make sure that we
        # don't change from a non-split context (both predicates are the same
        # object) to a split context (the predicates are different objects) unless
        # it's necessary.
        if (self._penalty_predicate == self._constraint_predicate
                and penalty_predicate == constraint_predicate):
            penalty_predicate = self._penalty_predicate & predicate.Predicate(
                penalty_predicate)
            constraint_predicate = penalty_predicate
        else:
            penalty_predicate = self._penalty_predicate & predicate.Predicate(
                penalty_predicate)
            constraint_predicate = self._constraint_predicate & predicate.Predicate(
                constraint_predicate)

        return SubsettableContext(raw_context=self._raw_context,
                                  penalty_predicate=penalty_predicate,
                                  constraint_predicate=constraint_predicate)
Пример #9
0
 def create_ratio_weights(weights_tensor):
     return term._RatioWeights.ratio(
         deferred_tensor.DeferredTensor(weights_tensor),
         predicate.Predicate(True), predicate.Predicate(True))
Пример #10
0
    def test_ratio_weights_ratio(self):
        """Tests `_RatioWeights`'s ratio() class method."""
        weights_placeholder = self.wrapped_placeholder(tf.float32,
                                                       shape=(None, ))
        numerator_predicate_placeholder = self.wrapped_placeholder(
            tf.bool, shape=(None, ))
        denominator_predicate_placeholder = self.wrapped_placeholder(
            tf.bool, shape=(None, ))
        memoizer = {
            defaults.DENOMINATOR_LOWER_BOUND_KEY: 0.0,
            defaults.GLOBAL_STEP_KEY: tf.compat.v2.Variable(0, dtype=tf.int32)
        }

        numerator_predicate = predicate.Predicate(
            numerator_predicate_placeholder)
        denominator_predicate = predicate.Predicate(
            denominator_predicate_placeholder)
        ratio_weights = term._RatioWeights.ratio(
            deferred_tensor.DeferredTensor(weights_placeholder),
            numerator_predicate, denominator_predicate)
        actual_weights, variables = ratio_weights.evaluate(memoizer)

        # We need to explicitly create the variables before creating the wrapped
        # session.
        for variable in variables:
            variable.create(memoizer)

        def update_ops_fn():
            update_ops = []
            for variable in variables:
                update_ops += variable.update_ops(memoizer)
            return update_ops

        with self.wrapped_session() as session:
            running_count = 0.0
            running_sum = 0.0
            for ii in xrange(len(self._splits) - 1):
                begin_index = self._splits[ii]
                end_index = self._splits[ii + 1]
                size = end_index - begin_index

                weights_subarray = self._weights[begin_index:end_index, 0]
                numerator_predicate_subarray = self._numerator_predicate[
                    begin_index:end_index]
                denominator_predicate_subarray = self._denominator_predicate[
                    begin_index:end_index]

                running_count += size
                running_sum += np.sum(weights_subarray *
                                      denominator_predicate_subarray)
                average_denominator = running_sum / running_count
                expected_weights = np.zeros(size)
                expected_weights = (weights_subarray *
                                    numerator_predicate_subarray)
                expected_weights /= average_denominator

                # Running the update_ops will update the running denominator count/sum
                # calculated by the _RatioWeights object.
                session.run_ops(update_ops_fn,
                                feed_dict={
                                    weights_placeholder:
                                    weights_subarray,
                                    numerator_predicate_placeholder:
                                    numerator_predicate_subarray,
                                    denominator_predicate_placeholder:
                                    denominator_predicate_subarray
                                })
                # Now we can calculate the weights.
                actual_weights_value = session.run(
                    lambda: actual_weights(memoizer),
                    feed_dict={
                        weights_placeholder:
                        weights_subarray,
                        numerator_predicate_placeholder:
                        numerator_predicate_subarray,
                        denominator_predicate_placeholder:
                        denominator_predicate_subarray
                    })

                self.assertAllClose(expected_weights,
                                    actual_weights_value,
                                    rtol=0,
                                    atol=1e-6)

                session.run_ops(
                    lambda: memoizer[defaults.GLOBAL_STEP_KEY].assign_add(1))
Пример #11
0
    def test_arithmetic(self):
        """Tests `BasicExpression`'s arithmetic operators."""
        structure_memoizer = {
            defaults.DENOMINATOR_LOWER_BOUND_KEY: 0.0,
            defaults.GLOBAL_STEP_KEY: tf.compat.v2.Variable(0, dtype=tf.int32)
        }

        dummy_predictions = deferred_tensor.ExplicitDeferredTensor(
            tf.constant(0, dtype=tf.float32, shape=(1, )))
        dummy_weights = deferred_tensor.ExplicitDeferredTensor(1.0)
        true_predicate = predicate.Predicate(True)

        def ratio_expression(positive_coefficient, negative_coefficient,
                             loss_function):
            term_object = term.BinaryClassificationTerm.ratio(
                positive_coefficient, negative_coefficient, dummy_predictions,
                dummy_weights, true_predicate, true_predicate, loss_function)
            return basic_expression.BasicExpression([term_object])

        def constant_expression(constant):
            return basic_expression.BasicExpression(
                [term.TensorTerm(tf.constant(constant, dtype=tf.float32))])

        # This expression exercises all of the operators. The first and third
        # ratio_expression()s will have the same losses (and everything else except
        # the coefficients), and will therefore be compatible. The second has a
        # different loss, and will be incompatible with the other two.
        expression_object = (
            constant_expression(0.3) -
            (ratio_expression(1.0, 0.0, loss.ZeroOneLoss()) / 2.3 +
             0.7 * ratio_expression(0.5, 0.5, loss.HingeLoss())) +
            (constant_expression(1.2) + ratio_expression(
                0.0, 1.0, loss.ZeroOneLoss()) - constant_expression(0.1)) * 0.6
            + constant_expression(0.8))

        expected_constant = 0.3 + (1.2 - 0.1) * 0.6 + 0.8
        coefficients = np.array([-1.0 / 2.3, -0.7, 0.6], dtype=np.float32)
        positive_coefficients = np.array([1.0, 0.5, 0.0],
                                         dtype=np.float32) * coefficients
        negative_coefficients = np.array([0.0, 0.5, 1.0],
                                         dtype=np.float32) * coefficients
        # The expected weights for the two zero-one terms will be merged, since
        # they're compatible. There is only one hinge term.
        expected_zero_one_positive_weights = (positive_coefficients[0] +
                                              positive_coefficients[2])
        expected_zero_one_negative_weights = (negative_coefficients[0] +
                                              negative_coefficients[2])
        expected_hinge_positive_weights = positive_coefficients[1]
        expected_hinge_negative_weights = negative_coefficients[1]

        # We should have three terms, since the two compatible
        # BinaryClassificationTerms will be merged, and we'll have one TensorTerm.
        expression_terms = expression_object._terms
        expression_binary_classification_terms = [
            tt for tt in expression_terms
            if isinstance(tt, term.BinaryClassificationTerm)
        ]
        expression_tensor_terms = [
            tt for tt in expression_terms if isinstance(tt, term.TensorTerm)
        ]
        self.assertEqual(3, len(expression_terms))
        self.assertEqual(2, len(expression_binary_classification_terms))
        self.assertEqual(1, len(expression_tensor_terms))
        zero_one_term, hinge_term = expression_binary_classification_terms
        if zero_one_term.loss != loss.ZeroOneLoss():
            zero_one_term, hinge_term = hinge_term, zero_one_term
        self.assertEqual(zero_one_term.loss, loss.ZeroOneLoss())
        self.assertEqual(hinge_term.loss, loss.HingeLoss())

        actual_constant = expression_tensor_terms[0].evaluate(
            structure_memoizer)
        actual_zero_one_positive_weights = (
            zero_one_term.positive_ratio_weights.evaluate(structure_memoizer))
        actual_zero_one_negative_weights = (
            zero_one_term.negative_ratio_weights.evaluate(structure_memoizer))
        actual_hinge_positive_weights = (
            hinge_term.positive_ratio_weights.evaluate(structure_memoizer))
        actual_hinge_negative_weights = (
            hinge_term.negative_ratio_weights.evaluate(structure_memoizer))

        # We need to explicitly create the variables before creating the wrapped
        # session.
        variables = deferred_tensor.DeferredVariableList(
            actual_constant.variables +
            actual_zero_one_positive_weights.variables +
            actual_zero_one_negative_weights.variables +
            actual_hinge_positive_weights.variables +
            actual_hinge_negative_weights.variables)
        for variable in variables:
            variable.create(structure_memoizer)

        with self.wrapped_session() as session:
            self.assertAllClose(expected_constant,
                                actual_constant(structure_memoizer),
                                rtol=0,
                                atol=1e-6)

            self.assertAllClose(
                np.array([expected_zero_one_positive_weights]),
                session.run(
                    actual_zero_one_positive_weights(structure_memoizer)),
                rtol=0,
                atol=1e-6)
            self.assertAllClose(
                np.array([expected_zero_one_negative_weights]),
                session.run(
                    actual_zero_one_negative_weights(structure_memoizer)),
                rtol=0,
                atol=1e-6)
            self.assertAllClose(
                np.array([expected_hinge_positive_weights]),
                session.run(actual_hinge_positive_weights(structure_memoizer)),
                rtol=0,
                atol=1e-6)
            self.assertAllClose(
                np.array([expected_hinge_negative_weights]),
                session.run(actual_hinge_negative_weights(structure_memoizer)),
                rtol=0,
                atol=1e-6)
def split_rate_context(penalty_predictions,
                       constraint_predictions,
                       penalty_labels=None,
                       constraint_labels=None,
                       penalty_weights=1.0,
                       constraint_weights=1.0):
    """Creates a new split context.

  A "split context", unlike a normal context, has separate predictions, labels,
  weights and subset for the "penalty" and "constraint" portions of the problem.
  This is an advanced option, and is not needed in most circumstances.

  Args:
    penalty_predictions: rank-1 floating-point `Tensor`, for which the ith
      element is the output of the model on the ith training example, for the
      training dataset associated with the penalties.
    constraint_predictions: rank-1 floating-point `Tensor`, for which the ith
      element is the output of the model on the ith training example, for the
      training dataset associated with the constraints.
    penalty_labels: optional rank-1 `Tensor`, for which the ith element is the
      label of the ith training example, for the training dataset associated
      with the penalties.
    constraint_labels: optional rank-1 `Tensor`, for which the ith element is
      the label of the ith training example, for the training dataset associated
      with the constraints.
    penalty_weights: optional rank-1 floating-point `Tensor`, for which the ith
      element is the weight of the ith training example, for the training
      dataset associated with the penalties. If not specified, the weights
      default to being all-one.
    constraint_weights: optional rank-1 floating-point `Tensor`, for which the
      ith element is the weight of the ith training example, for the training
      dataset associated with the constraints. If not specified, the weights
      default to being all-one.

  Returns:
    `SubsettableContext` representing the given predictions, labels and weights.

  Raises:
    ValueError: if we're in eager mode, but either penalty_predictions or
      constraint_predictions is not callable.
    TypeError: if any arguments are internal rate library objects, instead of
      `Tensor`s or scalars.
  """
    # See comment in rate_context.
    if isinstance(penalty_predictions, helpers.RateObject):
        raise TypeError(
            "penalty_predictions parameter to split_rate_context() "
            "should be a Tensor-like object, or a nullary function returning such"
        )
    if isinstance(constraint_predictions, helpers.RateObject):
        raise TypeError(
            "constraint_predictions parameter to "
            "split_rate_context() should be a Tensor-like object, or a nullary "
            "function returning such")
    if isinstance(penalty_labels, helpers.RateObject):
        raise TypeError(
            "penalty_labels parameter to split_rate_context() should "
            "be a Tensor-like object, or a nullary function returning such")
    if isinstance(constraint_labels, helpers.RateObject):
        raise TypeError(
            "constraint_labels parameter to split_rate_context() "
            "should be a Tensor-like object, or a nullary function returning such"
        )
    if isinstance(penalty_weights, helpers.RateObject):
        raise TypeError(
            "penalty_weights parameter to split_rate_context() "
            "should be a Tensor-like object, or a nullary function returning such"
        )
    if isinstance(constraint_weights, helpers.RateObject):
        raise TypeError(
            "constraint_weights parameter to split_rate_context() "
            "should be a Tensor-like object, or a nullary function returning such"
        )

    if tf.executing_eagerly():
        if not (callable(penalty_predictions)
                and callable(constraint_predictions)):
            raise ValueError(
                "in eager mode, the predictions provided to a context "
                "must be a nullary function returning a Tensor (to fix "
                "this, consider wrapping it in a lambda)")
        # Unlike the predictions, which *must* be callable, we allow non-Tensor
        # constants (e.g. python scalars or numpy arrays) for the labels and
        # weights. However, they cannot be ordinary Tensors.
        if tf.is_tensor(penalty_labels) or tf.is_tensor(constraint_labels):
            raise ValueError(
                "in eager mode, the labels provided to a context must "
                "either be a constant, or a nullary function returning "
                "a Tensor: it cannot be a plain Tensor (to fix this, "
                "consider wrapping it in a lambda)")
        if tf.is_tensor(penalty_weights) or tf.is_tensor(constraint_weights):
            raise ValueError(
                "in eager mode, the weights provided to a context must "
                "either be a constant, or a nullary function returning "
                "a Tensor: it cannot be a plain Tensor (to fix this, "
                "consider wrapping it in a lambda)")

    penalty_predictions = deferred_tensor.DeferredTensor(penalty_predictions)
    if penalty_labels is not None:
        penalty_labels = deferred_tensor.DeferredTensor(penalty_labels)
    penalty_weights = deferred_tensor.DeferredTensor(penalty_weights)

    constraint_predictions = deferred_tensor.DeferredTensor(
        constraint_predictions)
    if constraint_labels is not None:
        constraint_labels = deferred_tensor.DeferredTensor(constraint_labels)
    constraint_weights = deferred_tensor.DeferredTensor(constraint_weights)

    raw_context = _RawContext(penalty_predictions=penalty_predictions,
                              penalty_labels=penalty_labels,
                              penalty_weights=penalty_weights,
                              constraint_predictions=constraint_predictions,
                              constraint_labels=constraint_labels,
                              constraint_weights=constraint_weights)
    true_predicate = predicate.Predicate(True)
    return SubsettableContext(raw_context, true_predicate, true_predicate)
def rate_context(predictions, labels=None, weights=1.0):
    """Creates a new context.

  Args:
    predictions: rank-1 floating-point `Tensor`, for which the ith element is
      the output of the model on the ith training example.
    labels: optional rank-1 `Tensor`, for which the ith element is the label of
      the ith training example.
    weights: optional rank-1 floating-point `Tensor`, for which the ith element
      is the weight of the ith training example. If not specified, the weights
      default to being all-one.

  Returns:
    `SubsettableContext` representing the given predictions, labels and weights.

  Raises:
    ValueError: if we're in eager mode, but predictions is not callable.
    TypeError: if any arguments are internal rate library objects, instead of
      `Tensor`s or scalars.
  """
    # Ideally, we'd check that these objects are Tensors, or are types that can be
    # converted to Tensors. Unfortunately, this includes a lot of possible types,
    # so the easiest solution would be to actually perform the conversion, and
    # then check that the resulting Tensor has only one element. This, however,
    # would add a dummy element to the Tensorflow graph, and wouldn't work for a
    # Tensor with an unknown size. Hence, we only check that they are not types
    # that we know for certain are disallowed: objects internal to this library.
    if isinstance(predictions, helpers.RateObject):
        raise TypeError(
            "predictions parameter to rate_context() should be a "
            "Tensor-like object, or a nullary function returning such")
    if isinstance(labels, helpers.RateObject):
        raise TypeError(
            "labels parameter to rate_context() should be a "
            "Tensor-like object, or a nullary function returning such")
    if isinstance(weights, helpers.RateObject):
        raise TypeError(
            "weights parameter to rate_context() should be a "
            "Tensor-like object, or a nullary function returning such")

    if tf.executing_eagerly():
        if not callable(predictions):
            raise ValueError(
                "in eager mode, the predictions provided to a context "
                "must be a nullary function returning a Tensor (to fix "
                "this, consider wrapping it in a lambda)")
        # Unlike the predictions, which *must* be callable, we allow non-Tensor
        # constants (e.g. python scalars or numpy arrays) for the labels and
        # weights. However, they cannot be ordinary Tensors.
        if tf.is_tensor(labels):
            raise ValueError(
                "in eager mode, the labels provided to a context must "
                "either be a constant, or a nullary function returning "
                "a Tensor: it cannot be a plain Tensor (to fix this, "
                "consider wrapping it in a lambda)")
        if tf.is_tensor(weights):
            raise ValueError(
                "in eager mode, the weights provided to a context must "
                "either be a constant, or a nullary function returning "
                "a Tensor: it cannot be a plain Tensor (to fix this, "
                "consider wrapping it in a lambda)")

    predictions = deferred_tensor.DeferredTensor(predictions)
    if labels is not None:
        labels = deferred_tensor.DeferredTensor(labels)
    weights = deferred_tensor.DeferredTensor(weights)

    raw_context = _RawContext(penalty_predictions=predictions,
                              penalty_labels=labels,
                              penalty_weights=weights,
                              constraint_predictions=predictions,
                              constraint_labels=labels,
                              constraint_weights=weights)
    true_predicate = predicate.Predicate(True)
    return SubsettableContext(raw_context, true_predicate, true_predicate)