Exemplo n.º 1
0
 def test_ManyErrors(self):
     model = Seq(
         [Linear(2, 3, initialize='ones'),
          Linear(3, 1, initialize='ones')])
     x = np.random.rand(2)
     y = model.forward(x)
     model.backward(np.array([1.]))
Exemplo n.º 2
0
 def test_Expand(self):
     model = Seq([
         Linear(2, 3, initialize='ones'),
         Linear(3, 2, initialize='ones'),
     ])
     x = np.random.rand(2)
     model.forward(x)
     back = model.backward(np.ones(2))
Exemplo n.º 3
0
    def test_LinearLayerNumericalGradientCheck(self):
        x = np.random.rand(3)

        model = Seq()
        model.add(Linear(3, 2, initialize='ones'))

        num_grad = numerical_gradient.calc(model.forward, x)
        deriv_grad = model.backward(np.array([1, 1]))
        num_grad = np.sum(num_grad, axis=0)

        numerical_gradient.assert_are_similar(deriv_grad, num_grad)
Exemplo n.º 4
0
class WxBiasLinear(Layer):
    def __init__(self, in_size, out_size, initialize_W, initialize_b):
        self.Wx = Wx(in_size, out_size, initialize_W)
        self.bias = PlusBias(out_size, initialize_b)
        self.model = Seq(self.Wx, self.bias)

    def forward(self, x, is_training=False):
        return self.model.forward(x, is_training)

    def backward(self, dJdy):
        return self.model.backward(dJdy)

    def update_weights(self, optimizer):
        return self.model.update_weights(optimizer)
Exemplo n.º 5
0
    def test_iris(self):
        scores = []
        for i in range(1):
            hidden = 50
            l1 = Linear(4, hidden, initialize='ones')
            l2 = Linear(hidden, 3, initialize='ones')
            l1.W *= 0.000000001
            l2.W *= 0.00000001
            model = Seq(l1, Sigmoid, l2)
            loss = CrossEntropyLoss()

            trainer = OnlineTrainer()
            losses = trainer.train(model,
                                   self.train_set,
                                   epochs=100,
                                   loss=loss,
                                   optimizer=SGD(learning_rate=0.01))

            score = loss.test_score(model, self.train_set)
            print("hidden=%f score=%f" % (hidden, score))
            scores.append(score)
            self.plot_loss_history(losses)
            plt.show()

        self.assertGreaterEqual(numpy.mean(scores), 94.)
Exemplo n.º 6
0
 def test_LinearSigmoid(self):
     model = Seq()
     model.add(Linear(2, 1, initialize='ones'))
     model.add(Sigmoid())
     data = np.array([2., 3.])
     out = model.forward(data)
     self.assertEqual(round(out, 2), 1.)
Exemplo n.º 7
0
 def test_LinearSoftmax(self):
     model = Seq()
     model.add(Linear(2, 1))
     model.add(Softmax())
     data = np.array([2., 3.])
     out = model.forward(data)
     self.assertEqual(out, 1.)
Exemplo n.º 8
0
    def test_Perceptron(self):
        train_set, test_set = gen_data()

        model = Seq([
            Linear(2, 5, initialize='random'),
            Sigmoid(),
            Linear(5, 1, initialize='random'),
            Sigmoid(),
        ])

        OnlineTrainer().train(
            model,
            train_set=train_set,
            loss=SquaredLoss(),
            # optimizer=SGD(learning_rate=0.1),
            optimizer=MomentumSGD(learning_rate=0.1, momentum=0.9),
            # optimizer=AdaGrad(learning_rate=0.9),
            # optimizer=RMSProp(learning_rate=0.1, decay_rate=0.9),
            epochs=200,
            save_progress=False)

        # model.learn_minibatch(
        # input_data=train_data,
        # target_data=train_targets,
        # loss=SquaredLoss(),
        # batch_size=5,
        # # optimizer=SGD(learning_rate=0.1),
        # # optimizer=MomentumSGD(learning_rate=0.1, momentum=0.9),
        # optimizer=AdaGrad(learning_rate=0.9),
        # # optimizer=RMSProp(learning_rate=0.1, decay_rate=0.9),
        #
        # epochs=100,
        # save_progress=True)

        model.save_to_file('perceptron.pkl')

        scatter_test_data(test_set, model)

        # model.plot_errors_history()
        # model.plot_loss_gradient_history()
        plt.show()
Exemplo n.º 9
0
    def test_TwoDifferentModelsShouldHaveDifferentGradients(self):
        x = np.random.rand(5)

        real_model = Seq([
            Linear(5, 3, initialize='ones'),
            Tanh(),
            Linear(3, 5, initialize='ones'),
            Tanh()
        ])
        y = real_model.forward(x)
        real_grad = real_model.backward(np.ones(5))

        num_model = Seq([
            Linear(5, 3, initialize='ones'),
            Relu(),
            Linear(3, 5, initialize='ones'),
            Relu()
        ])
        num_grad = numerical_gradient.calc(num_model.forward, x)
        num_grad = np.sum(num_grad, axis=1)
        self.assertFalse(numerical_gradient.are_similar(real_grad, num_grad))
Exemplo n.º 10
0
    def test_update_weights_layer_vs_syntax(self):
        x = np.array([1., 2., 3.])
        optimizer = SGD(0.1)

        W = np.random.rand(3, 3 + 1)

        linear_layer = layers.Linear(3, 3, initialize=W.copy())
        linear_layer_model = Seq(linear_layer, layers.Tanh)
        y = linear_layer_model.forward(x)
        back = linear_layer_model.backward(np.ones(3))

        var_x = Var('x')
        syntax_linear = Linear(3, 3, initialize=W.copy(), input=var_x)
        syntax_model = Tanh(syntax_linear)
        syntax_y = syntax_model.forward_variables({'x': x})
        syntax_back = syntax_model.backward_variables(np.ones(3))

        assert_array_equal(linear_layer.delta_W, syntax_linear.layer.delta_W)

        # update weights in both models
        linear_layer_model.update_weights(optimizer)
        syntax_model.update_weights(optimizer)

        assert_array_equal(y, syntax_y)
        assert_array_equal(back, syntax_back['x'])
        assert_array_equal(linear_layer.W, syntax_linear.layer.W)
Exemplo n.º 11
0
    def test_CheckMinibatchTrainerEqualsSimpleTrainer(self):
        train_set = [(np.random.rand(2), i) for i in xrange(3)]
        loss = SquaredLoss()
        epochs = 1
        optimizer = SGD(learning_rate=0.01)

        minibatch_model = Seq([Linear(2, 5, initialize='ones')])
        minibatch_trainer = MinibatchTrainer()
        minibatch_trainer.train_minibatches(minibatch_model,
                                            train_set,
                                            batch_size=1,
                                            loss=loss,
                                            epochs=epochs,
                                            optimizer=optimizer,
                                            shuffle=False)

        simple_model = Seq([Linear(2, 5, initialize='ones')])
        simple_trainer = OnlineTrainer()
        simple_trainer.train(simple_model, train_set, loss, epochs, optimizer)

        x = np.random.rand(2)

        simple_y = simple_model.forward(x)
        minibatch_y = minibatch_model.forward(x)

        assert_array_equal(simple_y, minibatch_y)
Exemplo n.º 12
0
    def test_compare_with_Linear(self):
        in_size = 2
        out_size = 3
        x = np.random.rand(in_size)
        # x = np.array([1., 1])
        optimizer = SGD(0.1)

        linear = Linear(in_size, out_size, initialize='zeros')

        wx = Wx(in_size, out_size, initialize='zeros')
        plusbias = PlusBias(out_size, initialize='zeros')
        wxbias = Seq(wx, plusbias)

        linear_y = linear.forward(x)
        wxbias_y = wxbias.forward(x)
        assert_array_equal(linear_y, wxbias_y)

        dJdy = np.random.rand(out_size)
        linear_grad = linear.backward(dJdy)
        wxbias_grad = wxbias.backward(dJdy)
        assert_array_equal(linear_grad, wxbias_grad)

        linear.update_weights(optimizer)
        wxbias.update_weights(optimizer)

        stack = np.vstack([plusbias.b.get(), wx.W.get().T]).T
        assert_array_equal(linear.W, stack)
Exemplo n.º 13
0
 def test_Reduce(self):
     model = Seq(
         [Linear(3, 2, initialize='ones'),
          Linear(2, 2, initialize='ones')])
     x = np.random.rand(3)
     model.forward(x)
     model.backward(np.array([1., 1.]))
Exemplo n.º 14
0
    def test_TwoLinearLayersTanh(self):
        x = np.random.rand(5)

        real_model = Seq([
            Linear(5, 3, initialize='ones'),
            Tanh(),
            Linear(3, 5, initialize='ones'),
            Tanh()
        ])
        y = real_model.forward(x)
        real_grad = real_model.backward(np.ones(5))

        num_model = Seq([
            Linear(5, 3, initialize='ones'),
            Tanh(),
            Linear(3, 5, initialize='ones'),
            Tanh()
        ])
        num_grad = numerical_gradient.calc(num_model.forward, x)

        num_grad = np.sum(num_grad, axis=1)
        self.assertTrue(numerical_gradient.are_similar(real_grad, num_grad))
Exemplo n.º 15
0
    def run(self,
            batch_size=10,
            learning_rate=0.6,
            train_set_percentage=1.0,
            epochs=3):
        model = Seq(
            WxBiasLinear(784, 10, initialize='random')
            # Dropout(0.5)
        )

        train_set_sliced = slice_percentage(self.train_set,
                                            train_set_percentage)

        trainer = MinibatchTrainer()
        trainer.train_minibatches(
            model,
            train_set_sliced,
            batch_size=batch_size,
            loss=CrossEntropyLoss(),
            epochs=epochs,
            optimizer=SGD(learning_rate=learning_rate),
            # optimizer=RMSProp(learning_rate=learning_rate, decay_rate=0.6),
            # optimizer=AdaGrad(learning_rate=learning_rate),
            show_progress=True)

        # self.show_mnist_grid(model, self.test_set)

        # trainer = PatienceTrainer()
        # trainer.train(model,
        #               train_set_sliced, self.valid_set, self.test_set,
        #               batch_size=batch_size,
        #               loss=CrossEntropyLoss(),
        #               max_epochs=100,
        #               # optimizer=MomentumSGD(learning_rate=learning_rate, momentum=0.5),
        #               optimizer=RMSProp(learning_rate=learning_rate, decay_rate=0.9),
        #               # optimizer=AdaGrad(learning_rate=learning_rate),
        #               test_score_function=self.test_score_fun
        # )

        test_score = CrossEntropyLoss().test_score(model, self.test_set)

        return {
            # 'train_score': train_score,
            'test_score': test_score,
        }
Exemplo n.º 16
0
 def test_MulLayer(self):
     # 2 * 3 * 4
     model = Seq(Par(Const(2.), Const(3.), Const(4.)), Mul)
     y = model.forward(np.array([1.]))
     assert_array_equal(y, 24.)
Exemplo n.º 17
0
 def test_init_and_forward_SumLayer(self):
     # 1 + 2 + 3
     model = Seq(Par(Const(1.), Const(2.), Const(3.)), Sum)
     y = model.forward(np.zeros(3))
     assert_array_equal(y, 6.)
Exemplo n.º 18
0
 def __init__(self, in_size, out_size, initialize_W, initialize_b):
     self.Wx = Wx(in_size, out_size, initialize_W)
     self.bias = PlusBias(out_size, initialize_b)
     self.model = Seq(self.Wx, self.bias)
Exemplo n.º 19
0
 def test_short_syntax(self):
     model = Seq(Linear(2, 1, initialize='ones'), Sigmoid)
     data = np.array([2., 3.])
     out = model.forward(data)
     self.assertEqual(round(out, 2), 1.)
Exemplo n.º 20
0
 def test_Linear(self):
     model = Seq()
     model.add(Linear(2, 1, initialize='ones'))
     data = np.array([2., 2.])
     y = model.forward(data)
     self.assertEqual(y, np.array([5]))