Esempio n. 1
0
    def test_device_instance(self):
        dev = Device.get_default()
        self.assertIs(dev, self.device)

        tensor = tF.input([0], Shape([]))
        dev = tensor.device()
        self.assertIs(dev, self.device)

        node = F.input([0], Shape([]))
        dev = node.device()
        self.assertIs(dev, self.device)

        my_device = Naive()
        self.assertIsNot(my_device, self.device)

        node = F.input([0], Shape([]), device=my_device)
        dev = node.device()
        self.assertIs(dev, my_device)

        dev = self.graph.get_device(node)
        self.assertIs(dev, my_device)

        param = Parameter(Shape([]))
        dev = param.device()
        self.assertIs(dev, self.device)
Esempio n. 2
0
 def test_TensorTest_CheckInplaceSubtractNN(self):
     for dev in TensorTest.devices:
         a_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
         b_data = [0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9]
         y_data = [1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3]
         a = tF.raw_input(Shape([2, 2], 3), a_data, dev)
         b = tF.raw_input(Shape([2, 2], 3), b_data, dev)
         a -= b
         self.assertEqual(y_data, a.to_list())
Esempio n. 3
0
    def test_Parameter_argument(self):
        # no argument
        p = Parameter()
        self.assertFalse(p.valid())

        # shape w/ Initializer
        p = Parameter(Shape([4, 3]), I.Constant(1))
        self.assertEqual(p.shape(), Shape([4, 3]))
        self.assertEqual(p.value.to_list(), [1] * 12)
Esempio n. 4
0
 def test_TensorTest_CheckInplaceAddNN(self):
     for dev in TensorTest.devices:
         a_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
         b_data = [0, -1, -2, -3, -3, -4, -5, -6, -6, -7, -8, -9]
         y_data = [1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3]
         a = tF.raw_input(Shape([2, 2], 3), a_data, dev)
         b = tF.raw_input(Shape([2, 2], 3), b_data, dev)
         a += b
         self.assertEqual(y_data, a.to_list())
Esempio n. 5
0
    def test_Parameter_argument(self):
        # shape w/o data
        p = Parameter(Shape([2, 3]))
        self.assertEqual(p.shape(), Shape([2, 3]))

        # shape w/ Initializer
        p = Parameter(Shape([4, 3]), I.Constant(1))
        self.assertEqual(p.shape(), Shape([4, 3]))
        self.assertEqual(p.value.to_list(), [1] * 12)

        # shape w/ list[float]
        p = Parameter(Shape([4, 3]), self.list_data[:12])
        self.assertEqual(p.shape(), Shape([4, 3]))
        self.assertEqual(p.value.to_list(), self.list_data[:12])

        # ndarray w/o shape
        p = Parameter(init=self.ndarray_data[0])
        self.assertEqual(p.shape(), Shape([4, 3]))
        self.assertEqual(p.value.to_list(), self.list_data[:12])

        # ndarray w/ shape
        p = Parameter(Shape([2, 6]), init=self.ndarray_data[0])
        self.assertEqual(p.shape(), Shape([2, 6]))
        self.assertEqual(p.value.to_list(), self.list_data[:12])

        # list[float] w/o shape
        self.assertRaises(TypeError, lambda: Parameter(init=self.list_data[:12]))
Esempio n. 6
0
 def test_TensorTest_CheckCopyValidToNew(self):
     for dev in TensorTest.devices:
         print(dev)
         tmp = tF.raw_input(Shape([2], 3), [1, 2, 3, 4, 5, 6], dev)
         x = Tensor(tmp)
         self.assertTrue(x.valid())
         self.assertTrue(tmp.valid())
         self.assertEqual(Shape([2], 3), x.shape())
         self.assertEqual(Shape([2], 3), tmp.shape())
         self.assertEqual([1, 2, 3, 4, 5, 6], x.to_list())
         self.assertEqual([1, 2, 3, 4, 5, 6], tmp.to_list())
Esempio n. 7
0
 def test_TensorTest_InplaceMultiplyConst(self):
     for dev in TensorTest.devices:
         x_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
         y_data = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24]
         x = tF.raw_input(Shape([2, 2], 3), x_data, dev)
         x *= 2
         self.assertEqual(y_data, x.to_list())
     for dev in TensorTest.devices:
         x_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
         y_data = [.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, 6]
         x = tF.raw_input(Shape([2, 2], 3), x_data, dev)
         x *= .5
         self.assertEqual(y_data, x.to_list())
Esempio n. 8
0
    def test_operators_input_argument(self):
        # list[ndarray] w/o shape
        x = F.input(self.ndarray_data)
        self.assertEqual(x.to_list(), self.list_data)
        self.assertEqual(x.shape(), Shape([4, 3], 2))

        # list[ndarray] w/ shape
        x = F.input(self.ndarray_data, Shape([2, 3], 4))
        self.assertEqual(x.to_list(), self.list_data)
        self.assertEqual(x.shape(), Shape([2, 3], 4))

        # ndarray w/o shape
        x = F.input(self.ndarray_data[0])
        self.assertEqual(x.to_list(), self.list_data[:12])
        self.assertEqual(x.shape(), Shape([4, 3], 1))

        # ndarray w/ shape
        x = F.input(self.ndarray_data[0], Shape([2, 3], 2))
        self.assertEqual(x.to_list(), self.list_data[:12])
        self.assertEqual(x.shape(), Shape([2, 3], 2))

        # list[float] w/o shape
        self.assertRaises(TypeError, lambda: F.input(self.list_data))

        # list[float] w/ shape
        x = F.input(self.list_data, shape=Shape([4, 3], 2))
        self.assertEqual(x.to_list(), self.list_data)
        self.assertEqual(x.shape(), Shape([4, 3], 2))
Esempio n. 9
0
    def test_TesnorTest_CheckResetValuesByConstant(self):
        for dev in TensorTest.devices:
            x = tF.raw_input(Shape([2, 2], 2), [42] * 8, dev)
            self.assertEqual([42] * 8, x.to_list())

            x = tF.raw_input(Shape([2, 2], 2), [0] * 8, dev)
            x.reset(42)
            self.assertEqual([42] * 8, x.to_list())

            x = tF.raw_input(Shape([2, 2], 2), [123] * 8, dev)
            copied = Tensor(x)

            x.reset(42)
            self.assertEqual([42] * 8, x.to_list())
            self.assertEqual([123] * 8, copied.to_list())
Esempio n. 10
0
 def test_parameter_stats(self):
     self.p.add_stats("stat1", Shape([2, 3]))
     self.p.add_stats("stat2", Shape([2, 4]))
     st1 = self.p.stats["stat1"]
     st1.reset(0)
     self.assertTrue((st1.to_ndarrays()[0] == np.zeros([2, 3])).all())
     self.p.stats["stat1"] = tF.input(np.ones([2, 3]))
     self.assertTrue((st1.to_ndarrays()[0] == np.ones([2, 3])).all())
     self.assertIn("stat1", self.p.stats)
     self.assertIn("stat2", self.p.stats)
     self.assertNotIn("stat3", self.p.stats)
     with self.assertRaises(NotImplementedError):
         del self.p.stats["stat1"]
     with self.assertRaises(AttributeError):
         self.p.stats = ParameterStatistics(self.p)
Esempio n. 11
0
    def test_TensorTest_CheckInvalidInplaceOps(self):
        for dev in TensorTest.devices:
            shapes = [
                Shape(),
                Shape([], 3),
                Shape([2, 2], 2),
            ]
            a = tF.raw_input(Shape([2, 2], 3), [0] * 12, dev)

            for shape in shapes:
                b = tF.raw_input(shape, [0] * shape.size(), dev)
                with self.assertRaises(RuntimeError):
                    a += b
                with self.assertRaises(RuntimeError):
                    a -= b
Esempio n. 12
0
    def test_ModelTest_CheckSaveLoad_Same(self):
        shape = Shape([2, 2])
        values1 = [1, 2, 3, 4]
        values2 = [5, 6, 7, 8]
        tmp = tempfile.NamedTemporaryFile()

        m1 = Model()
        m2 = Model()
        p1 = Parameter(shape, I.Constant(0))
        p1.value += tF.raw_input(shape, values1)
        p2 = Parameter(shape, I.Constant(0))
        p2.value += tF.raw_input(shape, values2)
        m1.add("p", p1)
        m2.add("p", p2)
        m1.add("sm", m2)

        m1.save(tmp.name)

        m1 = Model()
        m2 = Model()
        p1 = Parameter()
        p2 = Parameter()
        m1.add("p", p1)
        m2.add("p", p2)
        m1.add("sm", m2)

        m1.load(tmp.name)

        self.assertTrue(p1.valid())
        self.assertTrue(p2.valid())
        self.assertEqual(shape, p1.shape())
        self.assertEqual(shape, p2.shape())
        self.assertEqual(values1, p1.value.to_list())
        self.assertEqual(values2, p2.value.to_list())
Esempio n. 13
0
 def test_TensorTest_CheckArgMinDims(self):
     data = [
         3,
         4,
         5,
         0,
         1,
         2,
         6,
         7,
         8,
         0,
         -1,
         -2,
         -6,
         -7,
         -8,
         -3,
         -4,
         -5,
     ]
     expected = [
         [0, 0, 0, 2, 2, 2],
         [1, 1, 1, 1, 1, 1],
         [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
     ]
     for dev in TensorTest.devices:
         a = tF.raw_input(Shape([3, 3], 2), data, dev)
         for i, exp in enumerate(expected):
             self.assertEqual(exp, a.argmin(i))
Esempio n. 14
0
    def test_graph_instance(self):
        g = Graph.get_default()
        self.assertIs(g, self.graph)

        node = F.input([0], Shape([]))
        g = node.graph()
        self.assertIs(g, self.graph)
Esempio n. 15
0
    def test_ModelTest_CheckSaveLoad_Insufficient(self):
        shape = Shape([2, 2])
        values1 = [1, 2, 3, 4]
        values2 = [5, 6, 7, 8]
        tmp = tempfile.NamedTemporaryFile()

        m1 = Model()
        m2 = Model()
        p1 = Parameter(shape, I.Constant(0))
        p1.value += tF.raw_input(shape, values1)
        p2 = Parameter(shape, I.Constant(0))
        p2.value += tF.raw_input(shape, values2)
        m1.add("p", p1)
        m2.add("p", p2)
        m1.add("sm", m2)

        m1.save(tmp.name)

        m1 = Model()
        m2 = Model()
        p1 = Parameter()
        m1.add("p", p1)
        m1.add("sm", m2)

        with self.assertRaises(RuntimeError):
            m1.load(tmp.name)
Esempio n. 16
0
    def test_functions_input_argument(self):
        # list[ndarray] w/o shape
        x = F.input(self.ndarray_data)
        self.assertEqual(x.to_list(), self.list_data)
        self.assertEqual(x.shape(), Shape([4, 3], 2))

        # ndarray w/o shape
        x = F.input(self.ndarray_data[0])
        self.assertEqual(x.to_list(), self.list_data[:12])
        self.assertEqual(x.shape(), Shape([4, 3], 1))

        # list[float] w/o shape
        self.assertRaises(TypeError, lambda: F.input(self.list_data))

        # list[float] w/ shape
        x = F.raw_input(Shape([4, 3], 2), self.list_data)
        self.assertEqual(x.to_list(), self.list_data)
        self.assertEqual(x.shape(), Shape([4, 3], 2))
Esempio n. 17
0
    def test_TensorTest_CheckResetValuesByVector(self):
        for dev in TensorTest.devices:
            data = [1, 2, 3, 4, 5, 6, 7, 8]
            x = tF.raw_input(Shape([2, 2], 2), data, dev)
            self.assertEqual(data, x.to_list())

            data = [1, 2, 3, 4, 5, 6, 7, 8]
            x = tF.raw_input(Shape([2, 2], 2), [0] * 8, dev)
            x.reset_by_vector(data)
            self.assertEqual(data, x.to_list())

            data = [1, 2, 3, 4, 5, 6, 7, 8]
            x = tF.raw_input(Shape([2, 2], 2), [123] * 8, dev)
            copied = Tensor(x)

            x.reset_by_vector(data)
            self.assertEqual(data, x.to_list())
            self.assertEqual([123] * 8, copied.to_list())
Esempio n. 18
0
def train_func(trainer):
    dev = D.Naive(12345)
    Device.set_default(dev)
    g = Graph()
    Graph.set_default(g)

    pw1 = Parameter([8, 2], I.XavierUniform())
    pb1 = Parameter([8], I.Constant(0))
    pw2 = Parameter([1, 8], I.XavierUniform())
    pb2 = Parameter([1], I.Constant(0))

    trainer.add_parameter(pw1)
    trainer.add_parameter(pb1)
    trainer.add_parameter(pw2)
    trainer.add_parameter(pb2)

    input_data = [1, 1, 1, -1, -1, 1, -1, -1]
    output_data = [1, -1, -1, 1]

    for i in range(10):
        g.clear()
        x = F.input(input_data, Shape([2], 4))
        w1 = F.parameter(pw1)
        b1 = F.parameter(pb1)
        w2 = F.parameter(pw2)
        b2 = F.parameter(pb2)
        h = F.tanh(w1 @ x + b1)
        y = w2 @ h + b2

        t = F.input(output_data, Shape([], 4))
        diff = t - y
        loss = F.batch.mean(diff * diff)

        trainer.reset_gradients()
        loss.backward()
        trainer.update()

    return [
        pw1.value.to_list(),
        pb1.value.to_list(),
        pw2.value.to_list(),
        pb2.value.to_list()
    ]
Esempio n. 19
0
 def test_TensorTest_CheckNewMatrixMinibatchWithData(self):
     for dev in TensorTest.devices:
         data = [
             3,
             1,
             4,
             1,
             5,
             9,
             2,
             6,
             5,
             3,
             5,
             8,
             9,
             7,
             9,
             3,
             2,
             3,
             8,
             4,
             6,
             2,
             6,
             4,
         ]
         data_ndarray = [
             np.array([[3, 4, 5], [1, 1, 9]]),
             np.array([[2, 5, 5], [6, 3, 8]]),
             np.array([[9, 9, 2], [7, 3, 3]]),
             np.array([[8, 6, 6], [4, 2, 4]]),
         ]
         x = tF.raw_input(Shape([2, 3], 4), data, dev)
         self.assertTrue(x.valid())
         self.assertIs(dev, x.device())
         self.assertEqual(Shape([2, 3], 4), x.shape())
         self.assertEqual(data, x.to_list())
         with self.assertRaises(RuntimeError):
             x.to_float()
         self.assertTrue(np.array_equal(data_ndarray, x.to_ndarrays()))
Esempio n. 20
0
 def test_TensorTest_CheckNewScalarWithData(self):
     for dev in TensorTest.devices:
         x = tF.raw_input([], [1], dev)
         x_ndarray = [
             np.array([1]),
         ]
         self.assertTrue(x.valid())
         self.assertIs(dev, x.device())
         self.assertEqual(Shape(), x.shape())
         self.assertEqual([1], x.to_list())
         self.assertEqual(1.0, x.to_float())
         self.assertEqual(x_ndarray, x.to_ndarrays())
Esempio n. 21
0
 def forward(self, inputs):
     batch_size = len(inputs[0])
     wlookup = F.parameter(self.pwlookup)
     wxs = F.parameter(self.pwxs)
     wsy = F.parameter(self.pwsy)
     s = F.zeros(Shape([NUM_HIDDEN_UNITS], batch_size))
     outputs = []
     for i in range(len(inputs) - 1):
         w = F.pick(wlookup, inputs[i], 1)
         x = w + s
         s = F.sigmoid(wxs @ x)
         outputs.append(wsy @ s)
     return outputs
Esempio n. 22
0
 def test_TensorTest_CheckNewMatrixWithData(self):
     for dev in TensorTest.devices:
         data = [1, 2, 3, 4, 5, 6]
         data_ndarray = [
             np.array([[1, 3, 5], [2, 4, 6]]),
         ]
         x = tF.raw_input([2, 3], data, dev)
         self.assertTrue(x.valid())
         self.assertIs(dev, x.device())
         self.assertEqual(Shape([2, 3]), x.shape())
         self.assertEqual(data, x.to_list())
         with self.assertRaises(RuntimeError):
             x.to_float()
         self.assertTrue(np.array_equal(data_ndarray, x.to_ndarrays()))
Esempio n. 23
0
    def test_tensor_instance(self):
        param = Parameter(Shape([]))
        t_origin = param.gradient
        t = param.gradient
        self.assertIs(t, t_origin)

        t = Tensor(t_origin)
        self.assertEqual(t.to_list(), t.to_list())
        self.assertIsNot(t, t_origin)

        t = t_origin
        t *= 2
        self.assertIs(t, t_origin)

        t = t * 2
        self.assertIsNot(t, t_origin)
Esempio n. 24
0
    def decode_step(self, trg_words, train):
        sentence_len = self.concat_fb.shape()[1]

        b = self.whw_ @ self.trg_lstm_.get_h()
        b = F.reshape(b, Shape([1, b.shape()[0]]))
        b = F.broadcast(b, 0, sentence_len)
        x = F.tanh(self.t_concat_fb @ self.wfbw_ + b)
        atten_prob = F.softmax(x @ self.wwe_, 0)
        c = self.concat_fb @ atten_prob

        e = F.pick(self.trg_lookup_, trg_words, 1)
        e = F.dropout(e, self.dropout_rate_, train)

        h = self.trg_lstm_.forward(F.concat([e, c], 0))
        h = F.dropout(h, self.dropout_rate_, train)
        j = F.tanh(self.whj_ @ h + self.bj_)
        return self.wjy_ @ j + self.by_
Esempio n. 25
0
 def test_pytrainer_not_implemented(self):
     dev = D.Naive()
     Device.set_default(dev)
     trainer = IncompleteTrainer()
     p = Parameter(Shape([]))
     with self.assertRaises(NotImplementedError):
         trainer.add_parameter(p)
     with self.assertRaises(NotImplementedError):
         trainer.update()
     with self.assertRaises(NotImplementedError):
         Trainer.get_configs(trainer)
     with self.assertRaises(NotImplementedError):
         Trainer.set_configs(trainer, {'Trainer.epoch': 1}, {
             'Trainer.clip_threshold': 0.0,
             'Trainer.lr_scale': 1.0,
             'Trainer.l2_strength': 0.0
         })
Esempio n. 26
0
    def test_ModelTest_CheckSaveLoadWithStats(self):
        shape = Shape([2, 2])
        values1 = [1, 2, 3, 4]
        values2 = [5, 6, 7, 8]
        stats1 = [10, 20, 30, 40]
        stats2 = [50, 60, 70, 80]
        tmp = tempfile.NamedTemporaryFile()

        m1 = Model()
        m2 = Model()
        p1 = Parameter(shape, I.Constant(0))
        p1.value += tF.raw_input(shape, values1)
        p2 = Parameter(shape, I.Constant(0))
        p2.value += tF.raw_input(shape, values2)
        p1.add_stats("a", shape)
        p2.add_stats("b", shape)
        p1.stats["a"].reset_by_vector(stats1);
        p2.stats["b"].reset_by_vector(stats2);
        m1.add("p", p1)
        m2.add("p", p2)
        m1.add("sm", m2)

        m1.save(tmp.name)

        m1 = Model()
        m2 = Model()
        p1 = Parameter()
        p2 = Parameter()
        m1.add("p", p1)
        m2.add("p", p2)
        m1.add("sm", m2)

        m1.load(tmp.name)

        self.assertTrue(p1.valid())
        self.assertTrue(p2.valid())
        self.assertEqual(shape, p1.shape())
        self.assertEqual(shape, p2.shape())
        self.assertEqual(values1, p1.value.to_list())
        self.assertEqual(values2, p2.value.to_list())
        self.assertTrue("a" in p1.stats)
        self.assertTrue("b" in p2.stats)
        self.assertEqual(stats1, p1.stats["a"].to_list())
        self.assertEqual(stats2, p2.stats["b"].to_list())
Esempio n. 27
0
 def test_pytrainer_propagate_exception(self):
     dev = D.Naive()
     Device.set_default(dev)
     trainer = ExceptionTrainer()
     p = Parameter(Shape([]))
     with self.assertRaises(TestException) as ctx:
         trainer.add_parameter(p)
     self.assertEqual(str(ctx.exception), "configure_parameter")
     with self.assertRaises(TestException) as ctx:
         trainer.update()
     self.assertEqual(str(ctx.exception), "update_parameter")
     with self.assertRaises(TestException) as ctx:
         Trainer.get_configs(trainer)
     self.assertEqual(str(ctx.exception), "get_configs")
     with self.assertRaises(TestException) as ctx:
         Trainer.set_configs(trainer, {'Trainer.epoch': 1}, {
             'Trainer.clip_threshold': 0.0,
             'Trainer.lr_scale': 1.0,
             'Trainer.l2_strength': 0.0
         })
     self.assertEqual(str(ctx.exception), "set_configs")
Esempio n. 28
0
def main():
    # Loads data
    train_inputs = load_images("data/train-images-idx3-ubyte",
                               NUM_TRAIN_SAMPLES)
    train_labels = load_labels("data/train-labels-idx1-ubyte",
                               NUM_TRAIN_SAMPLES)
    test_inputs = load_images("data/t10k-images-idx3-ubyte", NUM_TEST_SAMPLES)
    test_labels = load_labels("data/t10k-labels-idx1-ubyte", NUM_TEST_SAMPLES)

    dev = D.CUDA(0)
    Device.set_default(dev)
    g = Graph()
    Graph.set_default(g)

    # Parameters of CNNs
    # Shape: {kernel_height, kernel_width, in_channels, out_channels}
    pw_cnn1 = Parameter(Shape([KERNEL_SIZE1, KERNEL_SIZE1, 1, NUM_CHANNELS1]),
                        I.XavierUniformConv2D())
    pw_cnn2 = Parameter(
        Shape([KERNEL_SIZE2, KERNEL_SIZE2, NUM_CHANNELS1, NUM_CHANNELS2]),
        I.XavierUniformConv2D())

    # Parameters of FC layers
    pw_fc1 = Parameter(Shape([NUM_HIDDEN_UNITS, NUM_INPUT_UNITS]),
                       I.XavierUniform())
    pw_fc2 = Parameter(Shape([NUM_OUTPUT_UNITS, NUM_HIDDEN_UNITS]),
                       I.XavierUniform())
    pb_fc1 = Parameter(Shape([NUM_HIDDEN_UNITS]), I.Constant(0))
    pb_fc2 = Parameter(Shape([NUM_OUTPUT_UNITS]), I.Constant(0))

    # Optimizer
    optimizer = O.SGD(.1)
    optimizer.add(pw_cnn1, pw_cnn2, pw_fc1, pw_fc2, pb_fc1, pb_fc2)

    # Helper lambda to construct the predictor network.
    def make_graph(inputs, train):
        # Input and parameters.
        #x = F.input(Shape([IMAGE_HEIGHT, IMAGE_WIDTH], BATCH_SIZE), inputs)
        x = F.input(inputs)
        w_cnn1 = F.parameter(pw_cnn1)
        w_cnn2 = F.parameter(pw_cnn2)
        w_fc1 = F.parameter(pw_fc1)
        w_fc2 = F.parameter(pw_fc2)
        b_fc1 = F.parameter(pb_fc1)
        b_fc2 = F.parameter(pb_fc2)
        # CNNs
        h_cnn1 = F.relu(F.conv2d(x, w_cnn1, PADDING1, PADDING1, 1, 1, 1, 1))
        h_pool1 = F.max_pool2d(h_cnn1, 2, 2, 0, 0, 2, 2)
        h_cnn2 = F.relu(
            F.conv2d(h_pool1, w_cnn2, PADDING2, PADDING2, 1, 1, 1, 1))
        h_pool2 = F.max_pool2d(h_cnn2, 2, 2, 0, 0, 2, 2)
        # FC layers
        x_fc = F.dropout(F.flatten(h_pool2), .5, train)
        h_fc = F.dropout(F.relu(F.matmul(w_fc1, x_fc) + b_fc1), .5, train)
        return F.matmul(w_fc2, h_fc) + b_fc2

    # Batch randomizer
    ids = list(range(NUM_TRAIN_SAMPLES))

    for epoch in range(MAX_EPOCH):
        # Shuffles sample IDs.
        random.shuffle(ids)

        # Training loop
        for batch in range(NUM_TRAIN_BATCHES):
            print("\rTraining... %d / %d" % (batch + 1, NUM_TRAIN_BATCHES),
                  end="")
            # Makes a minibatch for training.
            inputs = [
                train_inputs[ids[batch * BATCH_SIZE + i]]
                for i in range(BATCH_SIZE)
            ]
            labels = [
                train_labels[ids[batch * BATCH_SIZE + i]]
                for i in range(BATCH_SIZE)
            ]

            # Constructs the graph.
            g.clear()
            y = make_graph(inputs, True)
            loss = F.softmax_cross_entropy(y, labels, 0)
            avg_loss = F.batch.mean(loss)

            # Dump computation graph at the first time.
            # if epoch == 0 and batch == 0:
            #     print(g.dump("dot"))

            # Implicit forward, backward, and updates parameters.
            optimizer.reset_gradients()
            avg_loss.backward()
            optimizer.update()

        print()

        match = 0

        # Test loop
        for batch in range(NUM_TEST_BATCHES):
            print("\rTesting... %d / %d" % (batch + 1, NUM_TEST_BATCHES),
                  end="")
            # Makes a test minibatch.
            inputs = [
                test_inputs[batch * BATCH_SIZE + i] for i in range(BATCH_SIZE)
            ]

            # Constructs the graph.
            g.clear()
            y = make_graph(inputs, False)

            # Gets outputs, argmax, and compares them with the label.
            y_val = y.to_list()
            for i in range(BATCH_SIZE):
                maxval = -1e10
                argmax = -1
                for j in range(NUM_OUTPUT_UNITS):
                    v = y_val[j + i * NUM_OUTPUT_UNITS]
                    if v > maxval:
                        maxval = v
                        argmax = j

                if argmax == test_labels[i + batch * BATCH_SIZE]:
                    match += 1

        accuracy = 100.0 * match / NUM_TEST_SAMPLES
        print("epoch %d: accuracy: %.2f%%" % (epoch, accuracy))

    return 0