Exemple #1
0
    def primitiv_test2(self):
        dev = D.Naive()
        Device.set_default(dev)
        g = Graph()
        Graph.set_default(g)

        x = F.input(np.array([[1], [2]]))
        a = F.input(np.array([[1, 2], [1, 2]]))
        y = F.matmul(a, x)
        return y.to_list()
Exemple #2
0
    def test_node_pow(self):
        x = F.input(self.a)
        y = F.input(self.b)
        self.assertTrue(np.isclose((x ** y).to_ndarrays()[0], np.array([[1, 2], [81, 65536]])).all())
        self.assertTrue(np.isclose((x ** 2).to_ndarrays()[0], np.array([[1, 4], [9, 16]])).all())
        self.assertTrue(np.isclose((2 ** x).to_ndarrays()[0], np.array([[2, 4], [8, 16]])).all())
        self.assertTrue(np.isclose((x ** -2).to_ndarrays()[0], np.array([[1, 1/4], [1/9, 1/16]])).all())
        input_arr = np.array([1, -1, 3, -3, 5, -5])
        x = F.input(input_arr)
        self.assertTrue(((x ** 6).to_ndarrays()[0] == np.array([1, 1, 729, 729, 15625, 15625])).all())
        self.assertTrue(((x ** 9).to_ndarrays()[0] == np.array([1, -1, 19683, -19683, 1953125, -1953125])).all())
        input_arr = np.array([1, -1])
        x = F.input(input_arr)
        self.assertTrue(((x ** 0x7fffffff).to_ndarrays()[0] == np.array([1, -1])).all())
        self.assertTrue(((x ** -0x80000000).to_ndarrays()[0] == np.array([1, 1])).all())

        self.assertRaises(TypeError, lambda: pow(x, y, 2))
Exemple #3
0
    def test_functions_input_argument(self):
        # list[ndarray] w/o shape
        x = F.input(self.ndarray_data)
        self.assertEqual(x.to_list(), self.list_data)
        self.assertEqual(x.shape(), Shape([4, 3], 2))

        # ndarray w/o shape
        x = F.input(self.ndarray_data[0])
        self.assertEqual(x.to_list(), self.list_data[:12])
        self.assertEqual(x.shape(), Shape([4, 3], 1))

        # list[float] w/o shape
        self.assertRaises(TypeError, lambda: F.input(self.list_data))

        # list[float] w/ shape
        x = F.raw_input(Shape([4, 3], 2), self.list_data)
        self.assertEqual(x.to_list(), self.list_data)
        self.assertEqual(x.shape(), Shape([4, 3], 2))
Exemple #4
0
    def primitiv_test1(self):
        dev = D.Naive()
        Device.set_default(dev)
        g = Graph()
        Graph.set_default(g)

        x = F.input(np.array([[1], [2], [3]]))
        y = 2 * x + 3
        return y.to_list()
Exemple #5
0
    def make_graph(inputs, train):
        x = F.input(inputs)

        w1 = F.parameter(pw1)
        b1 = F.parameter(pb1)
        h = F.relu(w1 @ x + b1)

        h = F.dropout(h, .5, train)

        w2 = F.parameter(pw2)
        b2 = F.parameter(pb2)
        return w2 @ h + b2
Exemple #6
0
 def make_graph(inputs):
     # We first store input values explicitly on GPU 0.
     x = F.input(inputs, device=dev0)
     w1 = F.parameter(pw1)
     b1 = F.parameter(pb1)
     w2 = F.parameter(pw2)
     b2 = F.parameter(pb2)
     # The hidden layer is calculated and implicitly stored on GPU 0.
     h_on_gpu0 = F.relu(w1 @ x + b1)
     # `copy()` transfers the hiddne layer to GPU 1.
     h_on_gpu1 = F.copy(h_on_gpu0, dev1)
     # The output layer is calculated and implicitly stored on GPU 1.
     return w2 @ h_on_gpu1 + b2
Exemple #7
0
 def make_graph(inputs, train):
     # Input and parameters.
     #x = F.input(Shape([IMAGE_HEIGHT, IMAGE_WIDTH], BATCH_SIZE), inputs)
     x = F.input(inputs)
     w_cnn1 = F.parameter(pw_cnn1)
     w_cnn2 = F.parameter(pw_cnn2)
     w_fc1 = F.parameter(pw_fc1)
     w_fc2 = F.parameter(pw_fc2)
     b_fc1 = F.parameter(pb_fc1)
     b_fc2 = F.parameter(pb_fc2)
     # CNNs
     h_cnn1 = F.relu(F.conv2d(x, w_cnn1, PADDING1, PADDING1, 1, 1, 1, 1))
     h_pool1 = F.max_pool2d(h_cnn1, 2, 2, 0, 0, 2, 2)
     h_cnn2 = F.relu(
         F.conv2d(h_pool1, w_cnn2, PADDING2, PADDING2, 1, 1, 1, 1))
     h_pool2 = F.max_pool2d(h_cnn2, 2, 2, 0, 0, 2, 2)
     # FC layers
     x_fc = F.dropout(F.flatten(h_pool2), .5, train)
     h_fc = F.dropout(F.relu(F.matmul(w_fc1, x_fc) + b_fc1), .5, train)
     return F.matmul(w_fc2, h_fc) + b_fc2
 def test_node_mul(self):
     x = F.input(self.a)
     y = F.input(self.b)
     self.assertTrue(((x * y).to_ndarrays()[0] == np.array([[1, 2], [12, 32]])).all())
     self.assertTrue(((x * 2).to_ndarrays()[0] == np.array([[2, 4], [6, 8]])).all())
     self.assertTrue(((2 * x).to_ndarrays()[0] == np.array([[2, 4], [6, 8]])).all())
Exemple #9
0
    def primitiv_xor_test(self):
        dev = D.Naive()
        Device.set_default(dev)
        g = Graph()
        Graph.set_default(g)

        input_data = [
            np.array([[1], [1]]),
            np.array([[-1], [1]]),
            np.array([[-1], [-1]]),
            np.array([[1], [-1]]),
        ]

        label_data = [
            np.array([1]),
            np.array([-1]),
            np.array([1]),
            np.array([-1]),
        ]

        N = 8
        pw = Parameter([1, N], I.XavierUniform())
        pb = Parameter([], I.Constant(0))
        pu = Parameter([N, 2], I.XavierUniform())
        pc = Parameter([N], I.Constant(0))
        if os.path.isfile('output/xor/pw.data') and os.path.isfile(
                'output/xor/pb.data') and os.path.isfile(
                    'output/xor/pu.data') and os.path.isfile(
                        'output/xor/pc.data'):
            pw.load('output/xor/pw.data')
            pb.load('output/xor/pb.data')
            pu.load('output/xor/pu.data')
            pc.load('output/xor/pc.data')

        optimizer = O.SGD(0.01)
        optimizer.add(pw, pb, pu, pc)

        for epoch in range(1000):
            print(epoch, end=' ')

            g.clear()

            x = F.input(input_data)
            w = F.parameter(pw)
            b = F.parameter(pb)
            u = F.parameter(pu)
            c = F.parameter(pc)
            h = F.tanh(u @ x + c)
            y = F.tanh(w @ h + b)

            for val in y.to_list():
                print('{:+.6f},'.format(val), end=' ')

            loss = self.calc_loss(y, label_data)
            print('loss={:.6f}'.format(loss.to_float()))

            optimizer.reset_gradients()
            loss.backward()
            optimizer.update()

        pw.save('output/xor/pw.data')
        pb.save('output/xor/pb.data')
        pu.save('output/xor/pu.data')
        pc.save('output/xor/pc.data')

        return y.to_list()
Exemple #10
0
 def calc_loss(self, y, label_data):
     t = F.input(label_data)
     diff = y - t
     return F.batch.mean(diff * diff)
 def test_node_truediv(self):
     x = F.input(self.a)
     y = F.input(self.b)
     self.assertTrue(((x / y).to_ndarrays()[0] == np.array([[1, 2], [0.75, 0.5]])).all())
     self.assertTrue(((x / 2).to_ndarrays()[0] == np.array([[0.5, 1], [1.5, 2]])).all())
     self.assertTrue(((2 / y).to_ndarrays()[0] == np.array([[2, 2], [0.5, 0.25]])).all())
 def test_node_matmul(self):
     x = F.input(self.a)
     y = F.input(self.b)
     self.assertTrue(((x @ y).to_ndarrays()[0] == np.array([[9, 17], [19, 35]])).all())
     self.assertRaises(TypeError, lambda: x @ 2)
     self.assertRaises(TypeError, lambda: 2 @ x)
Exemple #13
0
def main():
    dev = D.Naive()  # or D.CUDA(gpuid)
    Device.set_default(dev)

    # Parameters
    pw1 = Parameter([8, 2], I.XavierUniform())
    pb1 = Parameter([8], I.Constant(0))
    pw2 = Parameter([1, 8], I.XavierUniform())
    pb2 = Parameter([], I.Constant(0))

    # Optimizer
    optimizer = O.SGD(0.1)

    # Registers parameters.
    optimizer.add(pw1, pb1, pw2, pb2)

    # Training data
    input_data = [
        np.array([1, 1], dtype=np.float32),  # Sample 1
        np.array([1, -1], dtype=np.float32),  # Sample 2
        np.array([-1, 1], dtype=np.float32),  # Sample 3
        np.array([-1, -1], dtype=np.float32),  # Sample 4
    ]
    output_data = [
        np.array([1], dtype=np.float32),  # Label 1
        np.array([-1], dtype=np.float32),  # Label 2
        np.array([-1], dtype=np.float32),  # Label 3
        np.array([1], dtype=np.float32),  # Label 4
    ]

    g = Graph()
    Graph.set_default(g)

    for i in range(10):
        g.clear()

        # Builds a computation graph.
        x = F.input(input_data)
        w1 = F.parameter(pw1)
        b1 = F.parameter(pb1)
        w2 = F.parameter(pw2)
        b2 = F.parameter(pb2)
        h = F.tanh(w1 @ x + b1)
        y = w2 @ h + b2

        # Obtains values.
        y_val = y.to_list()
        print("epoch ", i, ":")
        for j in range(4):
            print("  [", j, "]: ", y_val[j])

        # Extends the computation graph to calculate loss values.
        t = F.input(output_data)
        diff = t - y
        loss = F.batch.mean(diff * diff)

        # Obtains the loss.
        loss_val = loss.to_float()
        print("  loss: ", loss_val)

        # Updates parameters.
        optimizer.reset_gradients()
        loss.backward()
        optimizer.update()
 def test_node_sub(self):
     x = F.input(self.a)
     y = F.input(self.b)
     self.assertTrue(((x - y).to_ndarrays()[0] == np.array([[0, 1], [-1, -4]])).all())
     self.assertTrue(((x - 2).to_ndarrays()[0] == np.array([[-1, 0], [1, 2]])).all())
     self.assertTrue(((2 - x).to_ndarrays()[0] == np.array([[1, 0], [-1, -2]])).all())
 def test_node_add(self):
     x = F.input(self.a)
     y = F.input(self.b)
     self.assertTrue(((x + y).to_ndarrays()[0] == np.array([[2, 3], [7, 12]])).all())
     self.assertTrue(((x + 2).to_ndarrays()[0] == np.array([[3, 4], [5, 6]])).all())
     self.assertTrue(((2 + x).to_ndarrays()[0] == np.array([[3, 4], [5, 6]])).all())
 def test_node_neg(self):
     x = F.input(self.a)
     y = F.input(self.b)
     self.assertTrue(((-x).to_ndarrays()[0] == -self.a).all())
 def test_node_pos(self):
     x = F.input(self.a)
     y = F.input(self.b)
     self.assertTrue(((+x).to_ndarrays()[0] == self.a).all())
Exemple #18
0
 def test_input_ndarrays(self):
     x = F.input(self.input_data)
     self.assertEqual(x.to_list(), self.list_expected)
     self.assertTrue((x.to_ndarrays()[0] == self.input_data[0]).all())
     self.assertTrue((x.to_ndarrays()[1] == self.input_data[1]).all())