示例#1
0
 def test_auto_prune9(self):
     with fluid.dygraph.guard():
         value0 = np.arange(26).reshape(2, 13).astype("float32")
         value1 = np.arange(6).reshape(2, 3).astype("float32")
         value2 = np.arange(10).reshape(2, 5).astype("float32")
         linear = fluid.Linear(13, 5, dtype="float32")
         linear2 = fluid.Linear(5, 3, dtype="float32")
         a = fluid.dygraph.to_variable(value0)
         b = fluid.dygraph.to_variable(value1)
         c = fluid.dygraph.to_variable(value2)
         out1 = linear(a)
         linear_origin = linear.weight.numpy()
         out2 = linear2(out1)
         linear2_origin = linear2.weight.numpy()
         out2.stop_gradient = True
         out2.backward()
         optimizer = fluid.optimizer.SGD(
             learning_rate=0.003,
             parameter_list=(linear.parameters() + linear2.parameters()))
         optimizer.minimize(out2)
         self.assertTrue(
             np.array_equal(linear2_origin, linear2.weight.numpy()))
         self.assertTrue(
             np.array_equal(linear_origin, linear.weight.numpy()))
         try:
             linear2.weight.gradient()
         except ValueError as e:
             assert type(e) == ValueError
    def func_test_named_sublayers(self):
        with fluid.dygraph.guard():
            fc1 = fluid.Linear(10, 3)
            fc2 = fluid.Linear(3, 10, bias_attr=False)
            custom = MyLayer(3, 10)
            model = fluid.dygraph.Sequential(fc1, fc2, custom)
            named_sublayers = model.named_sublayers()
            list_named_sublayers = list(named_sublayers)

            expected_sublayers = [fc1, fc2, custom, custom.fc, custom.conv]
            self.assertEqual(len(list_named_sublayers),
                             len(expected_sublayers))
            for (name,
                 sublayer), expected_sublayer in zip(list_named_sublayers,
                                                     expected_sublayers):
                self.assertEqual(sublayer, expected_sublayer)

            list_sublayers = list(model.sublayers())
            self.assertEqual(len(list_named_sublayers), len(list_sublayers))
            for (name,
                 sublayer), expected_sublayer in zip(list_named_sublayers,
                                                     list_sublayers):
                self.assertEqual(sublayer, expected_sublayer)

            self.assertListEqual(
                [l for _, l in list(model.named_sublayers(include_self=True))],
                [model] + expected_sublayers)
 def boundary_net(self, main_prog, startup_prog):
     with fluid.program_guard(main_prog, startup_prog):
         fleet.init(is_collective=True)
         x = paddle.static.data(name='x', shape=[-1, 4], dtype='float32')
         with paddle.static.device_guard('gpu:0'):
             linear = fluid.Linear(4, 8, bias_attr=False)
             out = linear(x)
         with paddle.static.device_guard('gpu:1'):
             linear = fluid.Linear(8, 5, bias_attr=False)
             out = linear(out)
             avg_cost = paddle.mean(out)
         strategy = fleet.DistributedStrategy()
     return avg_cost, strategy
    def func_test_named_parameters(self):
        with fluid.dygraph.guard():
            fc1 = fluid.Linear(10, 3)
            fc2 = fluid.Linear(3, 10, bias_attr=False)
            custom = MyLayer(3, 10)
            model = paddle.nn.Sequential(fc1, fc2, custom)

            named_parameters = list(model.named_parameters())
            expected_named_parameters = list()
            for prefix, layer in model.named_sublayers():
                for name, param in layer.named_parameters(
                        include_sublayers=False):
                    full_name = prefix + ('.' if prefix else '') + name
                    expected_named_parameters.append((full_name, param))

            self.assertListEqual(expected_named_parameters, named_parameters)
示例#5
0
    def test_paddle_imperative_no_grad_guard(self):
        data = np.array([[2, 3], [4, 5]]).astype('float32')
        with fluid.dygraph.guard():
            l0 = fluid.Linear(2, 2)
            self.assertTrue(l0.weight._grad_ivar() is None)
            l1 = fluid.Linear(2, 2)
            with paddle.no_grad():
                self.assertTrue(l1.weight.stop_gradient is False)
                tmp = l1.weight * 2
                self.assertTrue(tmp.stop_gradient)
            x = paddle.to_tensor(data)
            y = paddle.add(l0(x), tmp)
            o = l1(y)
            o.backward()

            self.assertTrue(tmp._grad_ivar() is None)
            self.assertTrue(l0.weight._grad_ivar() is not None)
示例#6
0
    def test_no_grad_guard(self):
        data = np.array([[2, 3], [4, 5]]).astype('float32')
        with fluid.dygraph.guard():
            l0 = fluid.Linear(2, 2)
            self.assertTrue(l0.weight._grad_ivar() is None)
            l1 = fluid.Linear(2, 2)
            with fluid.dygraph.no_grad():
                self.assertTrue(l1.weight.stop_gradient is False)
                tmp = l1.weight * 2
                self.assertTrue(tmp.stop_gradient)
            x = fluid.dygraph.to_variable(data)
            y = l0(x) + tmp
            o = l1(y)
            o.backward()

            self.assertTrue(tmp._grad_ivar() is None)
            self.assertTrue(l0.weight._grad_ivar() is not None)
示例#7
0
 def test_auto_prune6(self):
     with fluid.dygraph.guard():
         value0 = np.arange(26).reshape(2, 13).astype("float32")
         value1 = np.arange(6).reshape(2, 3).astype("float32")
         value2 = np.arange(10).reshape(2, 5).astype("float32")
         linear = fluid.Linear(13, 5, dtype="float32")
         linear2 = fluid.Linear(3, 3, dtype="float32")
         a = fluid.dygraph.to_variable(value0)
         b = fluid.dygraph.to_variable(value1)
         c = fluid.dygraph.to_variable(value2)
         out1 = linear(a)
         out2 = linear2(b)
         out1.stop_gradient = True
         out = fluid.layers.concat(input=[out1, out2, c], axis=1)
         out.backward()
         self.assertTrue((linear.weight.gradient() == 0).all())
         self.assertTrue((out1.gradient() == 0).all())
    def func_sequential_list_params(self):
        data = np.random.uniform(-1, 1, [5, 10]).astype('float32')
        with fluid.dygraph.guard():
            data = fluid.dygraph.to_variable(data)
            model1 = fluid.dygraph.Sequential(fluid.Linear(10, 1),
                                              fluid.Linear(1, 2))
            res1 = model1(data)
            self.assertListEqual(res1.shape, [5, 2])
            model1[1] = fluid.Linear(1, 3)
            res1 = model1(data)
            self.assertListEqual(res1.shape, [5, 3])
            loss1 = fluid.layers.reduce_mean(res1)
            loss1.backward()

            l1 = fluid.Linear(10, 1)
            l2 = fluid.Linear(1, 3)
            model2 = fluid.dygraph.Sequential(['l1', l1], ['l2', l2])
            self.assertEqual(len(model2), 2)
            res2 = model2(data)
            self.assertTrue(l1 is model2.l1)
            self.assertListEqual(res2.shape, res1.shape)
            self.assertEqual(len(model1.parameters()),
                             len(model2.parameters()))
            del model2['l2']
            self.assertEqual(len(model2), 1)
            res2 = model2(data)
            self.assertListEqual(res2.shape, [5, 1])
            model2.add_sublayer('l3', fluid.Linear(1, 3))
            model2.add_sublayer('l4', fluid.Linear(3, 4))
            self.assertEqual(len(model2), 3)
            res2 = model2(data)
            self.assertListEqual(res2.shape, [5, 4])

            loss2 = fluid.layers.reduce_mean(res2)
            loss2.backward()
 def func_auto_prune10(self):
     with fluid.dygraph.guard():
         value0 = np.arange(26).reshape(2, 13).astype("float32")
         value1 = np.arange(6).reshape(2, 3).astype("float32")
         value2 = np.arange(10).reshape(2, 5).astype("float32")
         linear = fluid.Linear(13, 5, dtype="float32")
         linear2 = fluid.Linear(3, 3, dtype="float32")
         a = fluid.dygraph.to_variable(value0)
         b = fluid.dygraph.to_variable(value1)
         c = fluid.dygraph.to_variable(value2)
         out1 = linear(a)
         out2 = linear2(b)
         out1.stop_gradient = True
         out = fluid.layers.concat(input=[out1, out2, c], axis=1)
         #TODO(jiabin): In Eager Mode we don't actually need sort_sum_gradient, this test should be removed when we don't support fluid anymore.
         fluid.set_flags({'FLAGS_sort_sum_gradient': True})
         out.backward()
         self.assertTrue(linear.weight.gradient() is None)
         self.assertTrue(out1.gradient() is None)
示例#10
0
    def test_adam_op_dygraph(self):
        paddle.disable_static()
        value = np.arange(26).reshape(2, 13).astype("float32")
        a = fluid.dygraph.to_variable(value)
        linear = fluid.Linear(13, 5, dtype="float32")

        adam = paddle.optimizer.Adam(learning_rate=0.01,
                                     parameters=linear.parameters())
        out = linear(a)
        out.backward()
        adam.step()
        adam.clear_gradients()
示例#11
0
 def test_adam_with_grad_clip(self):
     paddle.disable_static()
     value = np.arange(26).reshape(2, 13).astype("float32")
     a = fluid.dygraph.to_variable(value)
     linear = fluid.Linear(13, 5, dtype="float32")
     clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0)
     adam = paddle.optimizer.Adam(0.1,
                                  parameters=linear.parameters(),
                                  grad_clip=clip)
     out = linear(a)
     out.backward()
     adam.step()
     adam.clear_gradients()
示例#12
0
 def forward(self, inputs):
     x = self.embedding(inputs)
     x = fluid.layers.reshape(x,
                              shape=[x.shape[0], 1, x.shape[1], x.shape[2]])
     x = self.conv1d(x)
     x = fluid.layers.pool2d(x, pool_size=(2, 1))
     x = fluid.layers.flatten(x)
     if self.if_built == False:
         self.fc = fluid.Linear(input_dim=x.shape[1],
                                output_dim=self.output_dim,
                                act='softmax')
         self.if_built = True
     x = self.fc(x)
     return x
示例#13
0
 def __init__(self, input_size, vocab_size, size, dtype="float32"):
     super(MyLayer2, self).__init__(dtype=dtype)
     self.embed0 = fluid.Embedding(size=(vocab_size, size))
     self.embed1 = fluid.Embedding(size=(vocab_size, size))
     self.linear_0 = fluid.Linear(input_size, size, dtype=dtype)
     self.linear_1 = fluid.Linear(input_size, size, dtype=dtype)
示例#14
0
with fluid.dygraph.guard():
    x = fluid.dygraph.to_variable(np.random.randn(5, 5))
    y = fluid.dygraph.to_variable(np.random.randn(5, 5))
    z = fluid.dygraph.to_variable(np.random.randn(5, 5))
    z.stop_gradient = False
    a = x + y
    print("a.stop_gradient=%s" % (str(a.stop_gradient)))
    b = a + z
    print("b.stop_gradient=%s" % (str(b.stop_gradient)))
# step 4 : 自动减枝2
print("step 4------------------------------")
with fluid.dygraph.guard():
    value0 = np.arange(26).reshape(2, 13).astype("float32")
    value1 = np.arange(6).reshape(2, 3).astype("float32")
    value2 = np.arange(10).reshape(2, 5).astype("float32")
    fc = fluid.Linear(13, 5, dtype="float32")
    fc2 = fluid.Linear(3, 3, dtype="float32")
    a = fluid.dygraph.to_variable(value0)
    b = fluid.dygraph.to_variable(value1)
    c = fluid.dygraph.to_variable(value2)
    out1 = fc(a)
    out2 = fc2(b)
    out1.stop_gradient = True
    out = fluid.layers.concat(input=[out1, out2, c], axis=1)
    print("out = %s" % (str(out.numpy())))
    out.backward()
    print("out = %s" % (str(out.numpy())))
    print("out.gradient = %s" % (str(out.gradient())))
    print("out.shape = %s" % (str(out.numpy().shape)))
    assert (fc.weight.gradient() == 0).all()
    assert (out1.gradient() == 0).all()
示例#15
0
import paddle.fluid as fluid
import numpy as np

with fluid.dygraph.guard():
    value = np.arange(26).reshape(2, 13).astype("float32")
    print("value",value)
    a = fluid.dygraph.to_variable(value)
    linear = fluid.Linear(13, 1, dtype="float32")
    optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.01,
                                  parameter_list=linear.parameters())
    out = linear(a)
    print("out",out)
    out.backward()
    optimizer.minimize(out)
    optimizer.clear_gradients()