def test_activity_regularizer_batch_independent(self):
    inputs = layers.Input(shape=(10,))
    x = layers.Dense(10, activation='relu', activity_regularizer='l2')(inputs)
    outputs = layers.Dense(1, activation='sigmoid')(x)
    model = Model(inputs, outputs)

    optimizer = RMSPropOptimizer(learning_rate=0.001)
    model.compile(
        optimizer,
        run_eagerly=testing_utils.should_run_eagerly())

    loss_small_batch = model.test_on_batch(np.ones((10, 10), 'float32'))
    loss_big_batch = model.test_on_batch(np.ones((20, 10), 'float32'))
    self.assertAlmostEqual(loss_small_batch, loss_big_batch, places=4)
Пример #2
0
    def test_loss_with_sample_weight_in_layer_call(self):
        class MyLayer(layers.Layer):
            def __init__(self):
                super(MyLayer, self).__init__()
                self.bias = testing_utils.Bias()

            def call(self, inputs):
                out = self.bias(inputs[0])
                self.add_loss(MAE()(inputs[1], out, inputs[2]))
                self.add_loss(
                    math_ops.reduce_mean(inputs[2] * mae(inputs[1], out)))
                return out

        inputs = Input(shape=(1, ))
        targets = Input(shape=(1, ))
        sw = Input(shape=(1, ))

        outputs = MyLayer()([inputs, targets, sw])
        model = Model([inputs, targets, sw], outputs)
        model.predict([self.x, self.y, self.w])
        model.compile(optimizer_v2.gradient_descent.SGD(0.05),
                      run_eagerly=testing_utils.should_run_eagerly(),
                      experimental_run_tf_function=testing_utils.
                      should_run_tf_function())

        history = model.fit([self.x, self.y, self.w], batch_size=3, epochs=5)
        self.assertAllClose(history.history['loss'], [2., 1.8, 1.6, 1.4, 1.2],
                            1e-3)

        output = model.evaluate([self.x, self.y, self.w])
        self.assertAlmostEqual(output, 1.0, 3)

        output = model.test_on_batch([self.x, self.y, self.w])
        self.assertAlmostEqual(output, 1.0, 3)
Пример #3
0
    images_x2 = images[images_idx_x2] / 255.
    images_fov = train_fov[idx]
    result = train_y[idx]

    logs = model.train_on_batch(x=[images_x1, images_x2, images_fov], y=result)
    sum_logs.append(logs)

    if batch % 200 == 0 and batch > 0:
        # check model on the validation data
        valid_idx = np.random.randint(0, len(test_x1), train_batch_size)
        valid_images_idx_x1 = test_x1[valid_idx]
        valid_images_idx_x2 = test_x2[valid_idx]
        valid_images_x1 = images[valid_images_idx_x1] / 255.
        valid_images_x2 = images[valid_images_idx_x2] / 255.
        valid_images_fov = train_fov[valid_idx]
        valid_result = test_y[valid_idx]

        v_loss = model.test_on_batch(
            x=[valid_images_x1, valid_images_x2, valid_images_fov],
            y=valid_result)

        avg_logs = np.average(sum_logs, axis=0)
        sum_logs = []

        print('%d [loss: %f]' % (batch, avg_logs[0]))
        write_log(callback, train_names, avg_logs, batch)
        write_log(callback, val_names, v_loss, batch)

    if batch % 5000 == 0 and batch > 0:
        save_models(model)