예제 #1
0
 def test_as_array_is_view(self):
     b = Matrix(5, 4, 3)
     a = b.as_array()
     a[:] = 23.
     self.assertEqual(b[0], 23.)
     self.assertEqual(b[5], 23.)
     self.assertEqual(b[12], 23.)
예제 #2
0
    def test_gradient_finite_differences(self):
        check_errors = []
        for l, a in itertools.product(self.layer_types,
                                      self.activation_functions):
            net = self.build_network(l, a)
            e, grad_calc, grad_approx = check_gradient(net,
                                                       n_batches=5,
                                                       n_timesteps=7,
                                                       rnd=rnd)
            check_errors.append(e)
            if e > 1e-4:
                # construct a weight view and break down the differences
                layer = net.layers.values()[1]  # the only layer
                b = Matrix(grad_approx)
                print('$$$$$$$$$$$$ approx $$$$$$$$$$$')
                diff = layer.create_param_view(b)
                for n, q in diff.items():
                    print("====== %s ======" % n)
                    print(q)
                print('$$$$$$$$$$$$ calc $$$$$$$$$$$')
                b = Matrix(grad_calc)
                diff = layer.create_param_view(b)
                for n, q in diff.items():
                    print("====== %s ======" % n)
                    print(q)
                print('$$$$$$$$$$$$ diff $$$$$$$$$$$')
                b = Matrix(grad_approx - grad_calc)
                diff = layer.create_param_view(b)
                for n, q in diff.items():
                    print("====== %s ======" % n)
                    print(q)
                print('\n')

            print("Checking Gradient of %s with %s = %0.4f" % (l(3), a, e))
        self.assertTrue(np.all(np.array(check_errors) < 1e-4))
예제 #3
0
 def test_construction_with_shape(self):
     b = Matrix(2, 3, 4)
     self.assertEqual(len(b), 2 * 3 * 4)
     self.assertEqual(b.get_feature_size(), 4)
     self.assertEqual(b.get_batch_size(), 3)
     self.assertEqual(b.get_time_size(), 2)
     self.assertEqual(b.shape(), (2, 3, 4))
예제 #4
0
 def test_as_array_is_view(self):
     b = Matrix(5, 4, 3)
     a = b.as_array()
     a[:] = 23.
     self.assertEqual(b[0], 23.)
     self.assertEqual(b[5], 23.)
     self.assertEqual(b[12], 23.)
예제 #5
0
    def test_gradient_finite_differences(self):
        e, grad_calc, grad_approx = check_gradient(self.net,
                                                   n_batches=self.batch_size,
                                                   n_timesteps=self.timesteps,
                                                   rnd=rnd)
        # construct a weight view and break down the differences
        layer = self.net.layers.values()[1]  # the only layer
        a = Matrix(grad_approx)
        b = Matrix(grad_approx - grad_calc)
        c = Matrix(grad_calc)

        diff = layer.create_param_view(b)
        approx = layer.create_param_view(a)
        calc = layer.create_param_view(c)
        E = 0.0

        for n, q in diff.items():
            if n == 'Timing':
                continue
            print("====== %s ======" % n)
            print("Calculated:")
            print(calc[n])
            print("Approx:")
            print(approx[n])
            print("Difference:")
            print(q)

            err = np.sum(q**2) / self.batch_size
            print(err)
            E += err

        print("Checking Gradient of ClockworkLayer with sigmoid = %0.4f" % E)
        self.assertTrue(E < 1e-6)
예제 #6
0
 def test_memory_management1(self):
     b = Matrix(10)
     a = b.as_array()
     del b
     gc.collect()
     a[1] = 1
     del a
     gc.collect()
예제 #7
0
 def test_memory_management1(self):
     b = Matrix(10)
     a = b.as_array()
     del b
     gc.collect()
     a[1] = 1
     del a
     gc.collect()
예제 #8
0
 def test_slicing_3dview_has_correct_shapes(self):
     b = Matrix(11, 9, 7)
     self.assertEqual(b[1:3].shape(), (2, 9, 7))
     self.assertEqual(b[3:11].shape(), (8, 9, 7))
     self.assertEqual(b[:3].shape(), (3, 9, 7))
     self.assertEqual(b[5:].shape(), (6, 9, 7))
     self.assertEqual(b[:].shape(), (11, 9, 7))
예제 #9
0
    def test_gradient_forked_architecture(self):
        check_errors = []
        in_layer = InputLayer(self.input_size)
        out_layer = ForwardLayer(self.output_size)

        in_layer >> ForwardLayer(3, name='A') >> out_layer
        in_layer >> ForwardLayer(2, name='B') >> out_layer

        net = build_net(out_layer)
        net.initialize(Gaussian(0.1))

        e, grad_calc, grad_approx = check_gradient(net,
                                                   n_batches=5,
                                                   n_timesteps=7,
                                                   rnd=rnd)
        check_errors.append(e)
        if e > 1e-4:
            # construct a weight view and break down the differences
            layer = net.layers.values()[1]  # the only layer
            b = Matrix(grad_approx - grad_calc)
            diff = layer.create_param_view(b)
            for n, q in diff.items():
                print("====== %s ======" % n)
                print(q)

        print("Checking Gradient of forked architecture = %0.4f" % e)
        self.assertTrue(np.all(np.array(check_errors) < 1e-4))
예제 #10
0
 def test_lstm_forward_pass_insensitive_to_fwd_state(self):
     net = self.build_network(LstmLayer, "tanh")
     out1 = net.forward_pass(self.X).copy()
     net.fwd_state_manager.initialize_buffer(
         Matrix(rnd.randn(net.fwd_state_manager.calculate_size())))
     out2 = net.forward_pass(self.X).copy()
     self.assertTrue(np.allclose(out1, out2))
예제 #11
0
    def test_lstm_backward_pass_insensitive_to_bwd_state(self):
        net = self.build_network(LstmLayer, "tanh")
        net.clear_internal_state()
        out1 = net.forward_pass(self.X).copy()
        targets = create_targets_object(np.zeros_like(out1))
        deltas1 = net.backward_pass(targets).copy()
        bwstate1 = net.get_bwd_state_for('LstmLayer')
        b1 = {}
        for h in bwstate1.keys():
            b1[h] = bwstate1[h].copy()

        net.bwd_state_manager.initialize_buffer(
            Matrix(rnd.randn(net.bwd_state_manager.calculate_size())))
        net.forward_pass(self.X).copy()
        deltas2 = net.backward_pass(targets).copy()
        bwstate2 = net.get_bwd_state_for('LstmLayer')
        b2 = {}
        for h in bwstate2.keys():
            b2[h] = bwstate2[h].copy()

        for b in b2:
            print(b)
            print(b1[b] - b2[b])

        self.assertTrue(np.allclose(deltas1, deltas2))
예제 #12
0
 def test_item_access(self):
     b = Matrix(3)
     b[0] = 5
     b[1] = 7
     b[2] = 10
     self.assertEqual(b[0], 5)
     self.assertEqual(b[1], 7)
     self.assertEqual(b[2], 10)
예제 #13
0
 def test_slicing_view_has_correct_values(self):
     b = Matrix(range(6))
     v = b[1:3]
     self.assertEqual(v[0], 1)
     self.assertEqual(v[1], 2)
     v = b[4:6]
     self.assertEqual(v[0], 4)
     self.assertEqual(v[1], 5)
예제 #14
0
 def test_memory_management2(self):
     a = np.array([[[1, 2, 3]]])
     b = Matrix(a)
     del a
     gc.collect()
     c = b[1]
     del c
     del b
     gc.collect()
예제 #15
0
 def test_slicing_gives_a_view(self):
     b = Matrix(10)
     v = b[1:2]
     self.assertEqual(len(v), 1)
     v = b[0:2]
     self.assertEqual(len(v), 2)
     v = b[0:10]
     self.assertEqual(len(v), 10)
     v = b[4:7]
     self.assertEqual(len(v), 3)
예제 #16
0
 def test_construction_with_shape(self):
     b = Matrix(2, 3, 4)
     self.assertEqual(len(b), 2 * 3 * 4)
     self.assertEqual(b.get_feature_size(), 4)
     self.assertEqual(b.get_batch_size(), 3)
     self.assertEqual(b.get_time_size(), 2)
     self.assertEqual(b.shape(), (2, 3, 4))
예제 #17
0
    def test_gradient_finite_differences(self):
        check_errors = []
        for cfg in self.lstm_configs:
            net = self.build_network(cfg)
            e, grad_calc, grad_approx = check_gradient(net, n_batches=10,
                                                       n_timesteps=10, rnd=rnd)
            check_errors.append(e)
            if e > 1e-4:
                # construct a weight view and break down the differences
                layer = net.layers.values()[1]  # the only layer
                b = Matrix(grad_approx - grad_calc)
                a = Matrix(grad_approx)
                c = Matrix(grad_calc)
                # appr = layer.create_param_view(a)
                # calc = layer.create_param_view(c)
                diff = layer.create_param_view(b)
                for n, q in diff.items():
                    print("====== %s ======" % n)
                    # print(appr[n])
                    # print(calc[n])
                    print(q)

            print("Checking Gradient of Lstm97 with %s = %0.4f" % (cfg, e))
        self.assertTrue(np.all(np.array(check_errors) < 1e-4))
예제 #18
0
    def test_rforward_finite_differences(self):
        check_errors = []
        for cfg in self.lstm_configs:
            net = self.build_network(cfg)
            e, allerrors = check_rpass(net, n_batches=5, n_timesteps=2, rnd=rnd)
            check_errors.append(e)
            if e > 1e-4:
                # construct a weight view and break down the differences
                layer = net.layers.values()[1]  # the only layer
                b = Matrix(allerrors.copy())
                diff = layer.create_param_view(b)
                for n, q in diff.items():
                    print("====== %s ======" % n)
                    print(q)

            print("Checking RForward pass of Lstm97 with %s = %0.4g" % (cfg, e))
        self.assertTrue(np.all(np.array(check_errors) < 1e-4))
예제 #19
0
    def test_lwta_gradient_finite_differences(self):
        check_errors = []
        for a in self.activation_functions:
            net = self.build_lwta_network(8, a)
            e, grad_calc, grad_approx = check_gradient(net,
                                                       n_batches=5,
                                                       n_timesteps=7,
                                                       rnd=rnd)
            check_errors.append(e)
            if e > 1e-4:
                # construct a weight view and break down the differences
                layer = net.layers.values()[1]  # the only layer
                b = Matrix(grad_approx - grad_calc)
                diff = layer.create_param_view(b)
                for n, q in diff.items():
                    print("====== %s ======" % n)
                    print(q)

            print("Checking Gradient of %s with LWTA = %0.4f" % (a, e))
        self.assertTrue(np.all(np.array(check_errors) < 1e-4))
예제 #20
0
    def test_staticlstm_gradient_finite_differences(self):
        t = 7
        b = 5
        check_errors = []
        net = self.build_staticlstm_network(3, 'sigmoid')
        e, grad_calc, grad_approx = check_gradient(net,
                                                   n_batches=b,
                                                   n_timesteps=t,
                                                   rnd=rnd)
        check_errors.append(e)
        if e > 1e-4:
            # construct a weight view and break down the differences
            layer = net.layers.values()[1]  # the only layer
            b = Matrix(grad_approx - grad_calc)
            diff = layer.create_param_view(b)
            for n, q in diff.items():
                print("====== %s ======" % n)
                print(q)

        # print("Checking Gradient of %s with %s = %0.4f" % (l(3), a, e))
        self.assertTrue(np.all(np.array(check_errors) < 1e-4))
예제 #21
0
    def test_rforward_finite_differences_multilayer(self):
        check_errors = []
        for l, a in itertools.product(self.layer_types,
                                      self.activation_functions):
            net = self.build_network(l, a, layers=2)
            e, allerrors = check_rpass(net,
                                       n_batches=5,
                                       n_timesteps=7,
                                       rnd=rnd)
            check_errors.append(e)
            if e > 1e-4:
                # construct a weight view and break down the differences
                layer = net.layers.values()[1]  # the only layer
                b = Matrix(allerrors.copy())
                diff = layer.create_param_view(b)
                for n, q in diff.items():
                    print("====== %s ======" % n)
                    print(q)

            print("Checking RForward pass of %s with %s = %0.4g" %
                  (l(3), a, e))
        self.assertTrue(np.all(np.array(check_errors) < 1e-4))
예제 #22
0
 def test_reshape_view_with_negative_values_raises_if_indivisible(self):
     b = Matrix(12, 2, 1)
     self.assertRaises(AssertionError, b.reshape, -1, 7, 2)
예제 #23
0
 def test_reshape_view_has_correct_shape(self):
     b = Matrix(12, 2, 1)
     self.assertEqual(b.reshape(4, 3, 2).shape(), (4, 3, 2))
     self.assertEqual(b.reshape(1, 1, 24).shape(), (1, 1, 24))
     self.assertEqual(b.reshape(1, 6, 4).shape(), (1, 6, 4))
예제 #24
0
 def test_reshape_view_with_negative_value_has_correct_shape(self):
     b = Matrix(12, 2, 1)
     self.assertEqual(b.reshape(-1, 3, 2).shape(), (4, 3, 2))
     self.assertEqual(b.reshape(1, 1, -1).shape(), (1, 1, 24))
     self.assertEqual(b.reshape(1, -1, 4).shape(), (1, 6, 4))
예제 #25
0
 def test_as_array(self):
     b = Matrix(5, 4, 3)
     a = b.as_array()
     self.assertEqual(a.shape, (5, 4, 3))
     self.assertTrue(np.all(a == 0.))
예제 #26
0
 def test_construction_with_size(self):
     for i in range(1, 11):
         b = Matrix(i)
         self.assertEqual(len(b), i)
         for j in range(i):
             self.assertEqual(b[j], 0.0)
예제 #27
0
 def test_construction_with_3d_nparray(self):
     a = np.array([[[1, 2, 3], [4, 4, 4]], [[1, 2, 3], [4, 4, 4]]])
     b = Matrix(a)
     self.assertEqual(len(b), len(a.flatten()))
     for i in range(len(b)):
         self.assertEqual(b[i], a.flatten()[i])
예제 #28
0
 def test_construction_with_1d_nparray(self):
     a = np.array([1, 2, 3, 3, 2, 3])
     b = Matrix(a)
     self.assertEqual(len(b), len(a))
     for i in range(len(a)):
         self.assertEqual(b[i], a[i])
예제 #29
0
 def test_reshape_view_has_correct_shape(self):
     b = Matrix(12, 2, 1)
     self.assertEqual(b.reshape(4, 3, 2).shape(), (4, 3, 2))
     self.assertEqual(b.reshape(1, 1, 24).shape(), (1, 1, 24))
     self.assertEqual(b.reshape(1, 6, 4).shape(), (1, 6, 4))
예제 #30
0
 def test_reshape_view_with_negative_value_has_correct_shape(self):
     b = Matrix(12, 2, 1)
     self.assertEqual(b.reshape(-1, 3, 2).shape(), (4, 3, 2))
     self.assertEqual(b.reshape(1, 1, -1).shape(), (1, 1, 24))
     self.assertEqual(b.reshape(1, -1, 4).shape(), (1, 6, 4))
예제 #31
0
 def test_reshape_view_with_multiple_negative_values_raises(self):
     b = Matrix(12, 2, 1)
     self.assertRaises(AssertionError, b.reshape, -1, -1, 1)
     self.assertRaises(AssertionError, b.reshape, -1, 4, -1)
     self.assertRaises(AssertionError, b.reshape, 6, -1, -1)
예제 #32
0
 def test_as_array(self):
     b = Matrix(5, 4, 3)
     a = b.as_array()
     self.assertEqual(a.shape, (5, 4, 3))
     self.assertTrue(np.all(a == 0.))