예제 #1
0
    def test_1dim(self):
        def test_fn(x):
            if x > 0.5:
                return 1
            return 0

        def make_model():
            inp = Input(shape=(1, ))
            h_layer = Dense(
                1,
                kernel_initializer=keras.initializers.RandomUniform(0.0, 1.0),
                activation='relu')(inp)
            outp = Dense(1, activation='sigmoid')(h_layer)
            return Model(inp, outp)

        model = make_model()
        model.compile(optimizer=GradientObserver(), loss='mse')
        opt = ScipyOptimizer(model)
        X = np.random.rand(100)
        y = np.vectorize(test_fn)(X)
        X_train, X_test, y_train, y_test = train_test_split(X,
                                                            y,
                                                            test_size=0.20,
                                                            random_state=42)

        result, _ = opt.fit(X_train, y_train, epochs=20, verbose=False)
        self.assertTrue(result['success'])
        self.assertLessEqual(model.test_on_batch(X_test, y_test), 1.0e-5)
예제 #2
0
 def test_fit_generator(self):
     matrix = make_test_matrix((10, 10), 50)
     generator = test_generator(matrix.tocoo())
     model = make_embedding_model(matrix.shape, 3)
     opt = ScipyOptimizer(model)
     result, _ = opt.fit_generator(generator, epochs=200, verbose=False)
     self.assertLess(result['fun'], 1.0e-3)
예제 #3
0
    def test_mult_inputs(self):
        def test_fn(x, y):
            return 2.0 * x + 4.0 * y + 1.0

        def make_model():
            x = Input(shape=(1, ))
            y = Input(shape=(1, ))
            join = Concatenate()([x, y])
            z = Dense(1)(join)
            return Model([x, y], z)

        model = make_model()
        model.compile(optimizer=GradientObserver(), loss='mse')
        opt = ScipyOptimizer(model)
        X = np.random.rand(10)
        Y = np.random.rand(10)
        Z = np.vectorize(test_fn)(X, Y)

        result, _ = opt.fit([X, Y], Z, epochs=100, verbose=False)
        self.assertTrue(result['success'])
예제 #4
0
    def test_non_trainable(self):
        """BatchNormalization uses non-trainable weights.
        """
        model = Sequential()
        model.add(Dense(3, use_bias=False, input_dim=4))
        model.add(BatchNormalization())
        model.add(Dense(1, use_bias=False))
        model.compile(optimizer=GradientObserver(), loss='mse')

        def fn(vec):
            a, b, c, d = vec
            return a * b + 2 * b + 3 * c + d

        inputs = np.random.rand(10, 4)
        outputs = np.zeros(inputs.shape[0])
        for i in range(inputs.shape[0]):
            outputs[i] = fn(inputs[i, :])

        opt = ScipyOptimizer(model)
        result, _ = opt.fit(inputs, outputs, epochs=50, verbose=False)
        self.assertLessEqual(result['fun'], 1.0e3)
        self.assertEqual(result['status'], 2, result['message'])
예제 #5
0
    def test_2layer(self):
        model = Sequential()
        model.add(Dense(3, use_bias=False, input_dim=4))
        model.add(Dense(1, use_bias=False))
        model.compile(optimizer=GradientObserver(), loss='mse')

        def fn(vec):
            a, b, c, d = vec
            return a * b + 2 * b + 3 * c + d

        inputs = np.random.rand(10, 4)
        outputs = np.zeros(inputs.shape[0])
        for i in range(inputs.shape[0]):
            outputs[i] = fn(inputs[i, :])

        opt = ScipyOptimizer(model)
        opt.fit(inputs, outputs, epochs=15, verbose=False)

        pred = model.predict(inputs)
        delta = outputs - pred.reshape(-1)
        self.assertEqual(delta.shape, outputs.shape)
        self.assertLess(delta.sum(), 0.01)
예제 #6
0
    def test_val_data(self):
        def test_fn(x):
            if x > 0.8:
                return 2
            return 0

        def make_model():
            inp = Input(shape=(1, ))
            h_layer = Dense(
                1,
                kernel_initializer=keras.initializers.RandomUniform(0.0, 1.0),
                activation='relu')(inp)
            outp = Dense(1, activation='sigmoid')(h_layer)
            return Model(inp, outp)

        model = make_model()
        model.compile(optimizer=GradientObserver(),
                      loss='mse',
                      metrics=['mae'])
        opt = ScipyOptimizer(model)
        X = np.random.rand(200)
        y = np.vectorize(test_fn)(X)
        X_train, X_test, y_train, y_test = train_test_split(X,
                                                            y,
                                                            test_size=0.20,
                                                            random_state=42)

        result, hist = opt.fit(X_train,
                               y_train,
                               epochs=50,
                               validation_data=(X_test, y_test),
                               verbose=False)
        self.assertLessEqual(result['fun'], 0.2)
        print(hist.history.keys())
        self.assertTrue('val_loss' in hist.history)
        self.assertTrue('val_mean_absolute_error' in hist.history
                        or 'val_mae' in hist.history)
예제 #7
0
    def test_lr(self):
        model = Sequential()
        model.add(Dense(1, use_bias=False, input_dim=4))
        model.compile(optimizer=GradientObserver(), loss='mse')

        def fn(vec):
            a, b, c, d = vec
            return 4 * a + 2 * b + 3 * c + d

        inputs = np.random.rand(10, 4)
        outputs = np.zeros(inputs.shape[0])
        for i in range(inputs.shape[0]):
            outputs[i] = fn(inputs[i, :])

        opt = ScipyOptimizer(model)
        result, hist = opt.fit(inputs, outputs, epochs=30, verbose=False)
        self.assertTrue(result['success'])
        self.assertTrue('loss' in hist.history)

        layers = [layer for layer in model._layers if layer.weights]
        w = layers[0].get_weights()[0].reshape(-1)
        w_p = opt._collect_weights()
        numpy.testing.assert_almost_equal(w, w_p)
        numpy.testing.assert_almost_equal(w, [4.0, 2.0, 3.0, 1.0], decimal=4)