Пример #1
0
    def testWeightedSumReduction(self):
        self.setup()
        weights = [
            tf.constant(1.0, shape=[self.batch_size])
            for _ in range(self.sequence_length)
        ]
        # Make the last element in the sequence to have zero weights.
        weights[-1] = tf.constant(0.0, shape=[self.batch_size])
        self.weights = tf.stack(weights, axis=1)
        with self.cached_session(use_gpu=True):
            seq_loss = loss.SequenceLoss(
                average_across_timesteps=False,
                average_across_batch=False,
                sum_over_timesteps=True,
                sum_over_batch=True)
            average_loss_per_example = seq_loss(self.targets, self.logits,
                                                self.weights)
            res = self.evaluate(average_loss_per_example)
            self.assertAllClose(self.expected_loss, res)

            seq_loss = loss.SequenceLoss(
                average_across_timesteps=False,
                average_across_batch=False,
                sum_over_timesteps=False,
                sum_over_batch=True)
            average_loss_per_sequence = seq_loss(self.targets, self.logits,
                                                 self.weights)
            res = self.evaluate(average_loss_per_sequence)
            compare_per_sequence = np.full((self.sequence_length),
                                           self.expected_loss)
            # The last element in every sequence are zeros, which will be
            # filtered.
            compare_per_sequence[-1] = 0.
            self.assertAllClose(compare_per_sequence, res)

            seq_loss = loss.SequenceLoss(
                average_across_timesteps=False,
                average_across_batch=False,
                sum_over_timesteps=True,
                sum_over_batch=False)
            average_loss_per_batch = seq_loss(self.targets, self.logits,
                                              self.weights)
            res = self.evaluate(average_loss_per_batch)
            compare_per_batch = np.full((self.batch_size), self.expected_loss)
            self.assertAllClose(compare_per_batch, res)

            seq_loss = loss.SequenceLoss(
                average_across_timesteps=False,
                average_across_batch=False,
                sum_over_timesteps=False,
                sum_over_batch=False)
            total_loss = seq_loss(self.targets, self.logits, self.weights)
            res = self.evaluate(total_loss)
            compare_total = np.full((self.batch_size, self.sequence_length),
                                    self.expected_loss)
            # The last element in every sequence are zeros, which will be
            # filtered.
            compare_total[:, -1] = 0
            self.assertAllClose(compare_total, res)
Пример #2
0
def test_sum_reduction():
    (
        batch_size,
        sequence_length,
        _,
        logits,
        targets,
        weights,
        expected_loss,
    ) = get_test_data()
    seq_loss = loss.SequenceLoss(
        average_across_timesteps=False,
        average_across_batch=False,
        sum_over_timesteps=True,
        sum_over_batch=True,
    )
    average_loss_per_example = seq_loss(targets, logits, weights)
    res = average_loss_per_example.numpy()
    np.testing.assert_allclose(expected_loss, res, atol=1e-6, rtol=1e-6)

    seq_loss = loss.SequenceLoss(
        average_across_timesteps=False,
        average_across_batch=False,
        sum_over_timesteps=False,
        sum_over_batch=True,
    )
    average_loss_per_sequence = seq_loss(targets, logits, weights)
    res = average_loss_per_sequence.numpy()
    compare_per_sequence = np.full((sequence_length), expected_loss)
    np.testing.assert_allclose(compare_per_sequence, res, atol=1e-6, rtol=1e-6)

    seq_loss = loss.SequenceLoss(
        average_across_timesteps=False,
        average_across_batch=False,
        sum_over_timesteps=True,
        sum_over_batch=False,
    )
    average_loss_per_batch = seq_loss(targets, logits, weights)
    res = average_loss_per_batch.numpy()
    compare_per_batch = np.full((batch_size), expected_loss)
    np.testing.assert_allclose(compare_per_batch, res, atol=1e-6, rtol=1e-6)

    seq_loss = loss.SequenceLoss(
        average_across_timesteps=False,
        average_across_batch=False,
        sum_over_timesteps=False,
        sum_over_batch=False,
    )
    total_loss = seq_loss(targets, logits, weights)
    res = total_loss.numpy()
    compare_total = np.full((batch_size, sequence_length), expected_loss)
    np.testing.assert_allclose(compare_total, res, atol=1e-6, rtol=1e-6)
Пример #3
0
    def testSumReduction(self):
        with self.cached_session(use_gpu=True):
            self.setup()
            seq_loss = loss.SequenceLoss(
                average_across_timesteps=False,
                average_across_batch=False,
                sum_over_timesteps=True,
                sum_over_batch=True,
            )
            average_loss_per_example = seq_loss(self.targets, self.logits,
                                                self.weights)
            res = self.evaluate(average_loss_per_example)
            self.assertAllClose(self.expected_loss, res)

            seq_loss = loss.SequenceLoss(
                average_across_timesteps=False,
                average_across_batch=False,
                sum_over_timesteps=False,
                sum_over_batch=True,
            )
            average_loss_per_sequence = seq_loss(self.targets, self.logits,
                                                 self.weights)
            res = self.evaluate(average_loss_per_sequence)
            compare_per_sequence = np.full((self.sequence_length),
                                           self.expected_loss)
            self.assertAllClose(compare_per_sequence, res)

            seq_loss = loss.SequenceLoss(
                average_across_timesteps=False,
                average_across_batch=False,
                sum_over_timesteps=True,
                sum_over_batch=False,
            )
            average_loss_per_batch = seq_loss(self.targets, self.logits,
                                              self.weights)
            res = self.evaluate(average_loss_per_batch)
            compare_per_batch = np.full((self.batch_size), self.expected_loss)
            self.assertAllClose(compare_per_batch, res)

            seq_loss = loss.SequenceLoss(
                average_across_timesteps=False,
                average_across_batch=False,
                sum_over_timesteps=False,
                sum_over_batch=False,
            )
            total_loss = seq_loss(self.targets, self.logits, self.weights)
            res = self.evaluate(total_loss)
            compare_total = np.full((self.batch_size, self.sequence_length),
                                    self.expected_loss)
            self.assertAllClose(compare_total, res)
Пример #4
0
def test_sequence_loss_class(average_across_timesteps, average_across_batch):

    (
        batch_size,
        sequence_length,
        _,
        logits,
        targets,
        weights,
        expected_loss,
    ) = get_test_data()
    seq_loss = loss.SequenceLoss(
        average_across_timesteps=average_across_timesteps,
        average_across_batch=average_across_batch,
        sum_over_timesteps=False,
        sum_over_batch=False,
    )
    average_loss_per_example = seq_loss(targets, logits, weights)
    res = average_loss_per_example.numpy()
    if average_across_timesteps and average_across_batch:
        expected = expected_loss
    elif not average_across_timesteps and average_across_batch:
        expected = np.full(sequence_length, expected_loss)
    elif average_across_timesteps and not average_across_batch:
        expected = np.full(batch_size, expected_loss)
    elif not average_across_timesteps and not average_across_batch:
        expected = np.full((batch_size, sequence_length), expected_loss)

    np.testing.assert_allclose(res, expected, atol=1e-6, rtol=1e-6)
Пример #5
0
 def testAmbiguousOrder(self):
     with self.assertRaisesRegexp(ValueError, 'because of ambiguous order'):
         with self.cached_session(use_gpu=True):
             self.setup()
             seq_loss = loss.SequenceLoss(average_across_timesteps=False,
                                          average_across_batch=True,
                                          sum_over_timesteps=True,
                                          sum_over_batch=False)
             self.evaluate(seq_loss(self.targets, self.logits,
                                    self.weights))
Пример #6
0
def test_ambiguous_order():
    with pytest.raises(ValueError, match="because of ambiguous order"):
        _, _, _, logits, targets, weights, _ = get_test_data()
        seq_loss = loss.SequenceLoss(
            average_across_timesteps=False,
            average_across_batch=True,
            sum_over_timesteps=True,
            sum_over_batch=False,
        )
        seq_loss(targets, logits, weights).numpy()
Пример #7
0
def test_keras_compatibility():
    """To test the compatibility of SequenceLoss with Keras's built-in
    training loops, we create a fake model which always outputs a pre-
    defined set of logits.

    Then we check the calculated loss to be equal to the expected
    loss. Note that since the fake model doesn't have any trainable
    parameters, no matter how many steps we train it, it always
    outputs the same loss value.
    """
    (
        batch_size,
        sequence_length,
        number_of_classes,
        logits,
        targets,
        weights,
        expected_loss,
    ) = get_test_data()
    targets = tf.one_hot(targets, depth=number_of_classes)

    def return_logits(x):
        logits_single_row = logits[0, :, :]
        logits_batch = tf.tile(tf.expand_dims(logits_single_row, 0),
                               [tf.shape(x)[0], 1, 1])
        return logits_batch

    inp = tf.keras.layers.Input(shape=(sequence_length, ))
    out = tf.keras.layers.Lambda(
        return_logits,
        output_shape=(sequence_length, number_of_classes),
    )(inp)
    model = tf.keras.models.Model(inp, out)

    loss_obj = loss.SequenceLoss()
    model.compile(optimizer="adam",
                  loss=loss_obj,
                  sample_weight_mode="temporal")

    # This is a fake input.
    x = tf.ones(shape=(batch_size, sequence_length))

    h = model.fit(
        x,
        targets,
        sample_weight=weights,
        batch_size=batch_size,
        steps_per_epoch=1,
    )

    calculated_loss = h.history["loss"][0]
    np.testing.assert_allclose(calculated_loss,
                               expected_loss,
                               rtol=1e-6,
                               atol=1e-6)
Пример #8
0
    def testKerasCompatibility(self):
        """To test the compatibility of SequenceLoss with Keras's built-in
        training loops, we create a fake model which always outputs a pre-
        defined set of logits.

        Then we check the calculated loss to be equal to the expected
        loss. Note that since the fake model doesn't have any trainable
        parameters, no matter how many steps we train it, it always
        outputs the same loss value.
        """
        with self.cached_session(use_gpu=True):
            self.setup()

            def return_logits(x):
                batch_size = tf.shape(x)[0]
                logits_single_row = self.logits[0, :, :]
                logits_batch = tf.tile(tf.expand_dims(logits_single_row, 0),
                                       [batch_size, 1, 1])
                return logits_batch

            inp = tf.keras.layers.Input(shape=(self.sequence_length, ))
            out = tf.keras.layers.Lambda(
                return_logits,
                output_shape=(self.sequence_length, self.number_of_classes),
            )(inp)
            model = tf.keras.models.Model(inp, out)

            loss_obj = loss.SequenceLoss()
            model.compile(optimizer="adam",
                          loss=loss_obj,
                          sample_weight_mode="temporal")

            # This is a fake input.
            x = tf.ones(shape=(self.batch_size, self.sequence_length))

            h = model.fit(
                x,
                self.targets,
                sample_weight=self.weights,
                batch_size=self.batch_size,
                steps_per_epoch=1,
            )

            calculated_loss = h.history["loss"][0]
            self.assertAllClose(calculated_loss, self.expected_loss)
Пример #9
0
def test_weighted_sum_reduction():
    (
        batch_size,
        sequence_length,
        _,
        logits,
        targets,
        _,
        expected_loss,
    ) = get_test_data()
    weights = [
        tf.constant(1.0, shape=[batch_size]) for _ in range(sequence_length)
    ]
    # Make the last element in the sequence to have zero weights.
    weights[-1] = tf.constant(0.0, shape=[batch_size])
    weights = tf.stack(weights, axis=1)
    seq_loss = loss.SequenceLoss(
        average_across_timesteps=False,
        average_across_batch=False,
        sum_over_timesteps=True,
        sum_over_batch=True,
    )
    average_loss_per_example = seq_loss(targets, logits, weights)
    res = average_loss_per_example.numpy()
    np.testing.assert_allclose(expected_loss, res, rtol=1e-6, atol=1e-6)

    seq_loss = loss.SequenceLoss(
        average_across_timesteps=False,
        average_across_batch=False,
        sum_over_timesteps=False,
        sum_over_batch=True,
    )
    average_loss_per_sequence = seq_loss(targets, logits, weights)
    res = average_loss_per_sequence.numpy()
    compare_per_sequence = np.full(sequence_length, expected_loss)
    # The last element in every sequence are zeros, which will be
    # filtered.
    compare_per_sequence[-1] = 0.0
    np.testing.assert_allclose(compare_per_sequence, res, rtol=1e-6, atol=1e-6)

    seq_loss = loss.SequenceLoss(
        average_across_timesteps=False,
        average_across_batch=False,
        sum_over_timesteps=True,
        sum_over_batch=False,
    )
    average_loss_per_batch = seq_loss(targets, logits, weights)
    res = average_loss_per_batch.numpy()
    compare_per_batch = np.full(batch_size, expected_loss)
    np.testing.assert_allclose(compare_per_batch, res, rtol=1e-6, atol=1e-6)

    seq_loss = loss.SequenceLoss(
        average_across_timesteps=False,
        average_across_batch=False,
        sum_over_timesteps=False,
        sum_over_batch=False,
    )
    total_loss = seq_loss(targets, logits, weights)
    res = total_loss.numpy()
    compare_total = np.full((batch_size, sequence_length), expected_loss)
    # The last element in every sequence are zeros, which will be
    # filtered.
    compare_total[:, -1] = 0
    np.testing.assert_allclose(compare_total, res, rtol=1e-6, atol=1e-6)