Esempio n. 1
0
    def test_compare_with_Linear(self):
        in_size = 2
        out_size = 3
        x = np.random.rand(in_size)
        # x = np.array([1., 1])
        optimizer = SGD(0.1)

        linear = Linear(in_size, out_size, initialize='zeros')

        wx = Wx(in_size, out_size, initialize='zeros')
        plusbias = PlusBias(out_size, initialize='zeros')
        wxbias = Seq(wx, plusbias)

        linear_y = linear.forward(x)
        wxbias_y = wxbias.forward(x)
        assert_array_equal(linear_y, wxbias_y)

        dJdy = np.random.rand(out_size)
        linear_grad = linear.backward(dJdy)
        wxbias_grad = wxbias.backward(dJdy)
        assert_array_equal(linear_grad, wxbias_grad)

        linear.update_weights(optimizer)
        wxbias.update_weights(optimizer)

        stack = np.vstack([plusbias.b.get(), wx.W.get().T]).T
        assert_array_equal(linear.W, stack)
Esempio n. 2
0
    def test_update_weights_layer_vs_syntax(self):
        x = np.array([1., 2., 3.])
        optimizer = SGD(0.1)

        W = np.random.rand(3, 3 + 1)

        linear_layer = layers.Linear(3, 3, initialize=W.copy())
        linear_layer_model = Seq(linear_layer, layers.Tanh)
        y = linear_layer_model.forward(x)
        back = linear_layer_model.backward(np.ones(3))

        var_x = Var('x')
        syntax_linear = Linear(3, 3, initialize=W.copy(), input=var_x)
        syntax_model = Tanh(syntax_linear)
        syntax_y = syntax_model.forward_variables({'x': x})
        syntax_back = syntax_model.backward_variables(np.ones(3))

        assert_array_equal(linear_layer.delta_W, syntax_linear.layer.delta_W)

        # update weights in both models
        linear_layer_model.update_weights(optimizer)
        syntax_model.update_weights(optimizer)

        assert_array_equal(y, syntax_y)
        assert_array_equal(back, syntax_back['x'])
        assert_array_equal(linear_layer.W, syntax_linear.layer.W)
Esempio n. 3
0
class WxBiasLinear(Layer):
    def __init__(self, in_size, out_size, initialize_W, initialize_b):
        self.Wx = Wx(in_size, out_size, initialize_W)
        self.bias = PlusBias(out_size, initialize_b)
        self.model = Seq(self.Wx, self.bias)

    def forward(self, x, is_training=False):
        return self.model.forward(x, is_training)

    def backward(self, dJdy):
        return self.model.backward(dJdy)

    def update_weights(self, optimizer):
        return self.model.update_weights(optimizer)