def test_device_instance(self): tensor = tF.raw_input([], [0]) dev = tensor.device() self.assertIs(dev, self.device) node = F.raw_input([], [0]) dev = node.device() self.assertIs(dev, self.device) my_device = Naive() self.assertIsNot(my_device, self.device) node = F.raw_input([], [0], dev=my_device) dev = node.device() self.assertIs(dev, my_device) param = Parameter([], I.Constant(1)) dev = param.device() self.assertIs(dev, self.device)
def train_func(optimizer): dev = D.Naive(12345) Device.set_default(dev) g = Graph() Graph.set_default(g) pw1 = Parameter([8, 2], I.XavierUniform()) pb1 = Parameter([8], I.Constant(0)) pw2 = Parameter([1, 8], I.XavierUniform()) pb2 = Parameter([1], I.Constant(0)) optimizer.add(pw1, pb1, pw2, pb2) input_data = [1, 1, 1, -1, -1, 1, -1, -1] output_data = [1, -1, -1, 1] for i in range(10): g.clear() x = F.raw_input(Shape([2], 4), input_data) w1 = F.parameter(pw1) b1 = F.parameter(pb1) w2 = F.parameter(pw2) b2 = F.parameter(pb2) h = F.tanh(w1 @ x + b1) y = w2 @ h + b2 t = F.raw_input(Shape([], 4), output_data) diff = t - y loss = F.batch.mean(diff * diff) optimizer.reset_gradients() loss.backward() optimizer.update() return [ pw1.value.to_list(), pb1.value.to_list(), pw2.value.to_list(), pb2.value.to_list() ]
def test_functions_input_argument(self): # list[ndarray] w/o shape x = F.input(self.ndarray_data) self.assertEqual(x.to_list(), self.list_data) self.assertEqual(x.shape(), Shape([4, 3], 2)) # ndarray w/o shape x = F.input(self.ndarray_data[0]) self.assertEqual(x.to_list(), self.list_data[:12]) self.assertEqual(x.shape(), Shape([4, 3], 1)) # list[float] w/o shape self.assertRaises(TypeError, lambda: F.input(self.list_data)) # list[float] w/ shape x = F.raw_input(Shape([4, 3], 2), self.list_data) self.assertEqual(x.to_list(), self.list_data) self.assertEqual(x.shape(), Shape([4, 3], 2))
def test_graph_instance(self): node = F.raw_input([], [0]) g = node.graph() self.assertIs(g, self.graph)