예제 #1
0
 def test_node_matmul(self):
     x = F.input(self.a)
     y = F.input(self.b)
     self.assertTrue(
         ((x @ y).to_ndarrays()[0] == np.array([[9, 17], [19, 35]])).all())
     self.assertRaises(TypeError, lambda: x @ 2)
     self.assertRaises(TypeError, lambda: 2 @ x)
예제 #2
0
 def test_node_pow(self):
     x = F.input(self.a)
     y = F.input(self.b)
     self.assertTrue(
         np.isclose((x**y).to_ndarrays()[0], np.array([[1, 2],
                                                       [81, 65536]])).all())
     self.assertTrue(
         np.isclose((x**2).to_ndarrays()[0], np.array([[1, 4],
                                                       [9, 16]])).all())
     self.assertTrue(
         np.isclose((2**x).to_ndarrays()[0], np.array([[2, 4],
                                                       [8, 16]])).all())
     self.assertTrue(
         np.isclose((x**-2).to_ndarrays()[0],
                    np.array([[1, 1 / 4], [1 / 9, 1 / 16]])).all())
     input_arr = np.array([1, -1, 3, -3, 5, -5])
     x = F.input(input_arr)
     self.assertTrue(((x**6).to_ndarrays()[0] == np.array(
         [1, 1, 729, 729, 15625, 15625])).all())
     self.assertTrue(((x**9).to_ndarrays()[0] == np.array(
         [1, -1, 19683, -19683, 1953125, -1953125])).all())
     input_arr = np.array([1, -1])
     x = F.input(input_arr)
     self.assertTrue(
         ((x**0x7fffffff).to_ndarrays()[0] == np.array([1, -1])).all())
     self.assertTrue(
         ((x**-0x80000000).to_ndarrays()[0] == np.array([1, 1])).all())
     self.assertTrue(np.isnan((x**0x80000000).to_ndarrays()[0]).any())
     self.assertTrue(np.isnan((x**-0x80000001).to_ndarrays()[0]).any())
     self.assertRaises(TypeError, lambda: pow(x, y, 2))
예제 #3
0
    def test_device_instance(self):
        dev = Device.get_default()
        self.assertIs(dev, self.device)

        tensor = tF.input([0], Shape([]))
        dev = tensor.device()
        self.assertIs(dev, self.device)

        node = F.input([0], Shape([]))
        dev = node.device()
        self.assertIs(dev, self.device)

        my_device = Naive()
        self.assertIsNot(my_device, self.device)

        node = F.input([0], Shape([]), device=my_device)
        dev = node.device()
        self.assertIs(dev, my_device)

        dev = self.graph.get_device(node)
        self.assertIs(dev, my_device)

        param = Parameter(Shape([]))
        dev = param.device()
        self.assertIs(dev, self.device)
예제 #4
0
 def test_node_add(self):
     x = F.input(self.a)
     y = F.input(self.b)
     self.assertTrue(
         ((x + y).to_ndarrays()[0] == np.array([[2, 3], [7, 12]])).all())
     self.assertTrue(((x + 2).to_ndarrays()[0] == np.array([[3, 4],
                                                            [5, 6]])).all())
     self.assertTrue(((2 + x).to_ndarrays()[0] == np.array([[3, 4],
                                                            [5, 6]])).all())
예제 #5
0
 def test_node_mul(self):
     x = F.input(self.a)
     y = F.input(self.b)
     self.assertTrue(
         ((x * y).to_ndarrays()[0] == np.array([[1, 2], [12, 32]])).all())
     self.assertTrue(((x * 2).to_ndarrays()[0] == np.array([[2, 4],
                                                            [6, 8]])).all())
     self.assertTrue(((2 * x).to_ndarrays()[0] == np.array([[2, 4],
                                                            [6, 8]])).all())
예제 #6
0
 def test_node_sub(self):
     x = F.input(self.a)
     y = F.input(self.b)
     self.assertTrue(
         ((x - y).to_ndarrays()[0] == np.array([[0, 1], [-1, -4]])).all())
     self.assertTrue(((x - 2).to_ndarrays()[0] == np.array([[-1, 0],
                                                            [1, 2]])).all())
     self.assertTrue(
         ((2 - x).to_ndarrays()[0] == np.array([[1, 0], [-1, -2]])).all())
예제 #7
0
 def test_node_truediv(self):
     x = F.input(self.a)
     y = F.input(self.b)
     self.assertTrue(((x / y).to_ndarrays()[0] == np.array([[1, 2],
                                                            [0.75,
                                                             0.5]])).all())
     self.assertTrue(((x / 2).to_ndarrays()[0] == np.array([[0.5, 1],
                                                            [1.5,
                                                             2]])).all())
     self.assertTrue(((2 / y).to_ndarrays()[0] == np.array([[2, 2],
                                                            [0.5,
                                                             0.25]])).all())
예제 #8
0
    def test_graph_instance(self):
        g = Graph.get_default()
        self.assertIs(g, self.graph)

        node = F.input([0], Shape([]))
        g = node.graph()
        self.assertIs(g, self.graph)
 def make_graph(inputs, train):
     # Stores input values.
     x = F.input(data=inputs)
     # Calculates the hidden layer.
     w1 = F.input(param=pw1)
     b1 = F.input(param=pb1)
     h = F.relu(F.matmul(w1, x) + b1)
     # Batch normalization
     #Node beta = F::input(pbeta);
     #Node gamma = F::input(pgamma);
     #h = F::batch::normalize(h) * gamma + beta;
     # Dropout
     h = F.dropout(h, .5, train)
     # Calculates the output layer.
     w2 = F.input(param=pw2)
     b2 = F.input(param=pb2)
     return F.matmul(w2, h) + b2
예제 #10
0
    def test_operators_input_argument(self):
        # list[ndarray] w/o shape
        x = F.input(self.ndarray_data)
        self.assertEqual(x.to_list(), self.list_data)
        self.assertEqual(x.shape(), Shape([4, 3], 2))

        # ndarray w/o shape
        x = F.input(self.ndarray_data[0])
        self.assertEqual(x.to_list(), self.list_data[:12])
        self.assertEqual(x.shape(), Shape([4, 3], 1))

        # list[float] w/o shape
        self.assertRaises(TypeError, lambda: F.input(self.list_data))

        # list[float] w/ shape
        x = F.raw_input(Shape([4, 3], 2), self.list_data)
        self.assertEqual(x.to_list(), self.list_data)
        self.assertEqual(x.shape(), Shape([4, 3], 2))
예제 #11
0
def train_func(trainer):
    dev = D.Naive(12345)
    Device.set_default(dev)
    g = Graph()
    Graph.set_default(g)

    pw1 = Parameter([8, 2], I.XavierUniform())
    pb1 = Parameter([8], I.Constant(0))
    pw2 = Parameter([1, 8], I.XavierUniform())
    pb2 = Parameter([1], I.Constant(0))

    trainer.add_parameter(pw1)
    trainer.add_parameter(pb1)
    trainer.add_parameter(pw2)
    trainer.add_parameter(pb2)

    input_data = [1, 1, 1, -1, -1, 1, -1, -1]
    output_data = [1, -1, -1, 1]

    for i in range(10):
        g.clear()
        x = F.input(input_data, Shape([2], 4))
        w1 = F.parameter(pw1)
        b1 = F.parameter(pb1)
        w2 = F.parameter(pw2)
        b2 = F.parameter(pb2)
        h = F.tanh(w1 @ x + b1)
        y = w2 @ h + b2

        t = F.input(output_data, Shape([], 4))
        diff = t - y
        loss = F.batch.mean(diff * diff)

        trainer.reset_gradients()
        loss.backward()
        trainer.update()

    return [
        pw1.value.to_list(),
        pb1.value.to_list(),
        pw2.value.to_list(),
        pb2.value.to_list()
    ]
예제 #12
0
    def make_graph(inputs, train):
        x = F.input(inputs)

        w1 = F.parameter(pw1)
        b1 = F.parameter(pb1)
        h = F.relu(w1 @ x + b1)

        h = F.dropout(h, .5, train)

        w2 = F.parameter(pw2)
        b2 = F.parameter(pb2)
        return w2 @ h + b2
예제 #13
0
def main():

    with DefaultScopeDevice(CPUDevice()):
        pw1 = Parameter("w1", [8, 2], I.XavierUniform())
        pb1 = Parameter("b1", [8], I.Constant(0))
        pw2 = Parameter("w2", [1, 8], I.XavierUniform())
        pb2 = Parameter("b2", [], I.Constant(0))

        trainer = T.SGD(0.1)

        trainer.add_parameter(pw1)
        trainer.add_parameter(pb1)
        trainer.add_parameter(pw2)
        trainer.add_parameter(pb2)

        input_data = np.array(
            [
                [1, 1],  # Sample 1
                [1, -1],  # Sample 2
                [-1, 1],  # Sample 3
                [-1, -1],  # Sample 4
            ],
            dtype=np.float32)

        output_data = np.array(
            [
                1,  # Label 1
                -1,  # Label 2
                -1,  # Label 3
                1,  # Label 4
            ],
            dtype=np.float32)

        for i in range(100):
            g = Graph()
            with DefaultScopeGraph(g):
                # Builds a computation graph.
                #x = F.input(shape=Shape([2], 4), data=input_data)
                x = F.input(data=input_data)
                w1 = F.input(param=pw1)
                b1 = F.input(param=pb1)
                w2 = F.input(param=pw2)
                b2 = F.input(param=pb2)
                h = F.tanh(F.matmul(w1, x) + b1)
                y = F.matmul(w2, h) + b2

                # Calculates values.
                y_val = g.forward(y).to_list()
                print("epoch ", i, ":")
                for j in range(4):
                    print("  [", j, "]: ", y_val[j])
                    #t = F.input(shape=Shape([], 4), data=output_data)
                    t = F.input(data=output_data)
                diff = t - y
                loss = F.batch.mean(diff * diff)
                loss_val = g.forward(loss).to_list()[0]
                print("  loss: ", loss_val)
                trainer.reset_gradients()
                g.backward(loss)
                trainer.update()
예제 #14
0
 def make_graph(inputs):
     # We first store input values explicitly on GPU 0.
     x = F.input(inputs, device=dev0)
     w1 = F.parameter(pw1)
     b1 = F.parameter(pb1)
     w2 = F.parameter(pw2)
     b2 = F.parameter(pb2)
     # The hidden layer is calculated and implicitly stored on GPU 0.
     h_on_gpu0 = F.relu(w1 @ x + b1)
     # `copy()` transfers the hiddne layer to GPU 1.
     h_on_gpu1 = F.copy(h_on_gpu0, dev1)
     # The output layer is calculated and implicitly stored on GPU 1.
     return w2 @ h_on_gpu1 + b2
예제 #15
0
def main():
    dev = D.Naive()  # or D.CUDA(gpuid)
    Device.set_default(dev)

    # Parameters
    pw1 = Parameter([8, 2], I.XavierUniform())
    pb1 = Parameter([8], I.Constant(0))
    pw2 = Parameter([1, 8], I.XavierUniform())
    pb2 = Parameter([], I.Constant(0))

    # Optimizer
    optimizer = O.SGD(0.1)

    # Registers parameters.
    optimizer.add_parameter(pw1)
    optimizer.add_parameter(pb1)
    optimizer.add_parameter(pw2)
    optimizer.add_parameter(pb2)

    # Training data
    input_data = [
        np.array([1, 1], dtype=np.float32),  # Sample 1
        np.array([1, -1], dtype=np.float32),  # Sample 2
        np.array([-1, 1], dtype=np.float32),  # Sample 3
        np.array([-1, -1], dtype=np.float32),  # Sample 4
    ]
    output_data = [
        np.array([1], dtype=np.float32),  # Label 1
        np.array([-1], dtype=np.float32),  # Label 2
        np.array([-1], dtype=np.float32),  # Label 3
        np.array([1], dtype=np.float32),  # Label 4
    ]

    g = Graph()
    Graph.set_default(g)

    for i in range(10):
        g.clear()

        # Builds a computation graph.
        x = F.input(input_data)
        w1 = F.parameter(pw1)
        b1 = F.parameter(pb1)
        w2 = F.parameter(pw2)
        b2 = F.parameter(pb2)
        h = F.tanh(w1 @ x + b1)
        y = w2 @ h + b2

        # Obtains values.
        y_val = y.to_list()
        print("epoch ", i, ":")
        for j in range(4):
            print("  [", j, "]: ", y_val[j])

        # Extends the computation graph to calculate loss values.
        t = F.input(output_data)
        diff = t - y
        loss = F.batch.mean(diff * diff)

        # Obtains the loss.
        loss_val = loss.to_float()
        print("  loss: ", loss_val)

        # Updates parameters.
        optimizer.reset_gradients()
        loss.backward()
        optimizer.update()
예제 #16
0
 def test_input_ndarrays(self):
     x = F.input(self.input_data)
     self.assertEqual(x.to_list(), self.list_expected)
     self.assertTrue((x.to_ndarrays()[0] == self.input_data[0]).all())
     self.assertTrue((x.to_ndarrays()[1] == self.input_data[1]).all())
예제 #17
0
 def test_node_pos(self):
     x = F.input(self.a)
     y = F.input(self.b)
     self.assertTrue(((+x).to_ndarrays()[0] == self.a).all())
예제 #18
0
 def test_node_neg(self):
     x = F.input(self.a)
     y = F.input(self.b)
     self.assertTrue(((-x).to_ndarrays()[0] == -self.a).all())