Exemplo n.º 1
0
    def test_category_cross_entropy(self):
        layer = tl.CategoryCrossEntropy()
        targets = np.array([0, 1])

        # Near-perfect prediction (for both items in batch).
        model_outputs = np.array([[9., 2., 0., -2.], [2., 9., 0., -2.]])
        loss = layer([model_outputs, targets])
        self.assertAlmostEqual(loss, .001, places=3)

        # More right than wrong (for both items in batch).
        model_outputs = np.array([[2.2, 2., 0., -2.], [2., 2.2, 0., -2.]])
        loss = layer([model_outputs, targets])
        self.assertAlmostEqual(loss, .665, places=3)

        # First item near perfect, second item more right than wrong.
        model_outputs = np.array([[9., 2., 0., -2.], [2., 2.2, 0., -2.]])
        loss = layer([model_outputs, targets])
        self.assertAlmostEqual(loss, .333, places=3)
Exemplo n.º 2
0
    def test_category_cross_entropy_with_label_smoothing(self):
        epsilon = 0.01
        layer = tl.CategoryCrossEntropy(label_smoothing=epsilon)
        targets = np.array([0, 1])

        # Near-perfect prediction (for both items in batch).
        model_outputs = np.array([[9., 2., 0., -2.], [2., 9., 0., -2.]])
        loss = layer([model_outputs, targets])
        self.assertAlmostEqual(loss, .069, places=3)

        # More right than wrong (for both items in batch).
        model_outputs = np.array([[2.2, 2., 0., -2.], [2., 2.2, 0., -2.]])
        loss = layer([model_outputs, targets])
        self.assertAlmostEqual(loss, .682, places=3)

        # First item near perfect, second item more right than wrong.
        model_outputs = np.array([[9., 2., 0., -2.], [2., 2.2, 0., -2.]])
        loss = layer([model_outputs, targets])
        self.assertAlmostEqual(loss, .375, places=3)
Exemplo n.º 3
0
    inputs_test = trax.data.Serial(
        trax.data.Shuffle(),
        trax.data.BucketByLength(boundaries=[4, 8, 16, 32, 89],
                                 batch_sizes=[64, 64, 32, 32, 16
                                              ]))(dt.get_input_sequence_and_gt(
                                                  train_data, len_input,
                                                  batch_size))
    recommender = RecommenderTransformer(n_classes_in=len(classes),
                                         embedding_size=embedding_size,
                                         n_out_classes=len(classes),
                                         dropout_rate=dropout_rate)

    n_train_b = math.floor(float(len(train_data)) / float(batch_size))
    train_task = ts.training.TrainTask(
        labeled_data=inputs_train,
        loss_layer=tl.CategoryCrossEntropy(),
        optimizer=trax.optimizers.Adam(learning_rate),
        n_steps_per_checkpoint=
        400,  #This will print the results at every 200 training steps.
    )
    n_eval_b = math.floor(float(len(val_data)) / float(batch_size))
    # Evaluaton task.
    eval_task = ts.training.EvalTask(
        labeled_data=inputs_test,
        metrics=[tl.CategoryCrossEntropy(),
                 tl.CategoryAccuracy()],
        n_eval_batches=n_eval_b)

    training_loop = ts.training.Loop(recommender,
                                     train_task,
                                     eval_tasks=[eval_task],