Beispiel #1
0
    def test_set_global_bias_initilizer(self):
        """Test Set Global Bias initilizer with NormalInitializer
        """
        main_prog = framework.Program()
        startup_prog = framework.Program()
        fluid.set_global_initializer(initializer.Uniform(low=-0.5, high=0.5),
                                     bias_init=initializer.Normal(loc=0.0,
                                                                  scale=2.0))
        with fluid.program_guard(main_prog, startup_prog):
            x = fluid.data(name="x", shape=[1, 3, 32, 32])
            # default initilizer of bias in layers.conv2d is ConstantInitializer
            conv = fluid.layers.conv2d(x, 5, 3)

        block = startup_prog.global_block()
        self.assertEqual(len(block.ops), 2)

        # init bias is the first op, and weight is the second
        bias_init_op = block.ops[0]
        self.assertEqual(bias_init_op.type, 'gaussian_random')
        self.assertAlmostEqual(bias_init_op.attr('mean'), 0.0, delta=DELTA)
        self.assertAlmostEqual(bias_init_op.attr('std'), 2.0, delta=DELTA)
        self.assertEqual(bias_init_op.attr('seed'), 0)

        param_init_op = block.ops[1]
        self.assertEqual(param_init_op.type, 'uniform_random')
        self.assertAlmostEqual(param_init_op.attr('min'), -0.5, delta=DELTA)
        self.assertAlmostEqual(param_init_op.attr('max'), 0.5, delta=DELTA)
        self.assertEqual(param_init_op.attr('seed'), 0)
        fluid.set_global_initializer(None)
Beispiel #2
0
net.add_sublayer('linear', nn.Linear(num_inputs, 1))

# Method 3:
# from collections import OrderedDict
# net = nn.Sequential(OrderedDict([
#     ('linear', nn.Linear(num_inputs, 1))
# ]))

print(net)

for param in net.parameters():
    print(param)

# 3.3.4 初始化模型参数
# 设置全局参数初始化
fluid.set_global_initializer(initializer.Uniform(), initializer.Constant())

# 3.3.5 定义损失函数
loss = nn.MSELoss()

# 3.3.6 定义优化算法
optimizer = optim.SGD(learning_rate=0.03, parameters=net.parameters())
print(optimizer)

# 设置不同自网络的学习率(待修改)
# optimizer = optim.SGD([
#     {'params': net._sub_layers1.paramaters()},
#     {'params': net._sub_layers2.paramaters(), 'lr': 0.01}
# ], learning_rate=0.03)

# for param_group in optimizer.param_groups: