示例#1
0
 def __init__(self):
     super(DoubleForBackprop, self).__init__()
     with self.init_scope():
         self.l = L.Linear(None, 3)
示例#2
0
    def test_copy_grad(self):
        def set_random_grad(link):
            link.cleargrads()
            x = np.random.normal(size=(1, 1)).astype(np.float32)
            y = link(x) * np.random.normal()
            F.sum(y).backward()

        # When source is not None and target is None
        a = L.Linear(1, 5)
        b = L.Linear(1, 5)
        set_random_grad(a)
        b.cleargrads()
        assert a.W.grad is not None
        assert a.b.grad is not None
        assert b.W.grad is None
        assert b.b.grad is None
        copy_param.copy_grad(target_link=b, source_link=a)
        np.testing.assert_almost_equal(a.W.grad, b.W.grad)
        np.testing.assert_almost_equal(a.b.grad, b.b.grad)
        assert a.W.grad is not b.W.grad
        assert a.b.grad is not b.b.grad

        # When both are not None
        a = L.Linear(1, 5)
        b = L.Linear(1, 5)
        set_random_grad(a)
        set_random_grad(b)
        assert a.W.grad is not None
        assert a.b.grad is not None
        assert b.W.grad is not None
        assert b.b.grad is not None
        copy_param.copy_grad(target_link=b, source_link=a)
        np.testing.assert_almost_equal(a.W.grad, b.W.grad)
        np.testing.assert_almost_equal(a.b.grad, b.b.grad)
        assert a.W.grad is not b.W.grad
        assert a.b.grad is not b.b.grad

        # When source is None and target is not None
        a = L.Linear(1, 5)
        b = L.Linear(1, 5)
        a.cleargrads()
        set_random_grad(b)
        assert a.W.grad is None
        assert a.b.grad is None
        assert b.W.grad is not None
        assert b.b.grad is not None
        copy_param.copy_grad(target_link=b, source_link=a)
        assert a.W.grad is None
        assert a.b.grad is None
        assert b.W.grad is None
        assert b.b.grad is None

        # When both are None
        a = L.Linear(1, 5)
        b = L.Linear(1, 5)
        a.cleargrads()
        b.cleargrads()
        assert a.W.grad is None
        assert a.b.grad is None
        assert b.W.grad is None
        assert b.b.grad is None
        copy_param.copy_grad(target_link=b, source_link=a)
        assert a.W.grad is None
        assert a.b.grad is None
        assert b.W.grad is None
        assert b.b.grad is None
示例#3
0
 def __init__(self, n_in, n_units, n_out):
     super(MnistMLP, self).__init__(
         l1=L.Linear(n_in, n_units),
         l2=L.Linear(n_units, n_units),
         l3=L.Linear(n_units, n_out),
     )
	def __init__(self, model_params):
		super(DNN, self).__init__(
			l1 = L.Linear(model_params['fp_length'],model_params['h1_size']),
			l2 = L.Linear(model_params['h1_size'],1),
			bnorm1 = L.BatchNormalization(model_params['h1_size']),
		)
示例#5
0
 def __init__(self):
     super().__init__()
     init_w = initializers.Normal(0.01)
     init_b = initializers.Constant(0)
     with self.init_scope():
         self.fc1 = L.Linear(None, 31, initialW=init_w, initial_bias=init_b)
示例#6
0
 def __init__(self, n_stack):
     super(StackSequence, self).__init__(
         w_xy=links.Linear(n_stack, 4 * n_stack),
         w_yy=links.Linear(n_stack, 4 * n_stack),
     )
示例#7
0
 def __init__(self, d_in, h, d_out):
     super(TwoLayerNet, self).__init__(
         linear1=L.Linear(d_in, h,  initialW=W1.transpose().copy()),
         linear2=L.Linear(h, d_out, initialW=W2.transpose().copy())
     )
示例#8
0
 def __init__(self, n_input, n_layer, n_units, n_output):
     super(SER, self).__init__()
     with self.init_scope():
         self.l1 = L.NStepBiLSTM(n_layer, n_input, n_units, 0.25)
         self.l2 = L.Linear(n_units * 2, n_output)
示例#9
0
 def __init__(self, obs_size, n_actions, n_hidden_channels=2):
     super(Qfunction, self).__init__()
     with self.init_scope():
         self.l1=L.Linear(obs_size, n_hidden_channels)
         self.l2=L.Linear(n_hidden_channels, n_hidden_channels)
         self.l3=L.Linear(n_hidden_channels, n_actions)
示例#10
0
 def __init__(self, obs_size, n_actions, n_units=100):
     super(QFunction, self).__init__()
     with self.init_scope():
         self.l0 = L.Linear(obs_size, n_units)
         self.l1 = L.Linear(n_units, n_units)
         self.l2 = L.Linear(n_units, n_actions)
 def __init__(self, num_classes, **kwargs):
     super(ResNet50, self).__init__()
     with self.init_scope():
         self.base = L.ResNet50Layers()
         self.fc_1 = L.Linear(None, 1024)
         self.fc_2 = L.Linear(1024, num_classes)
示例#12
0
 def __init__(self, obs_size, n_actions, n_hidden_channels=50):
     super().__init__()
     with self.init_scope():
         self.l0 = L.Linear(obs_size, n_hidden_channels)
         self.l1 = L.Linear(n_hidden_channels, n_hidden_channels)
         self.l2 = L.Linear(n_hidden_channels, n_actions)
示例#13
0
 def __init__(self, dim, dropout_rate, activate, isR, isBN):
     super(Module, self).__init__()
     with self.init_scope():
         self.x2z = L.Linear(dim, dim)  #,
         self.bn = L.BatchNormalization(dim)
示例#14
0
 def __init__(self, n_units, n_out):
     super(MLP, self).__init__(l1=L.Linear(None, n_units),
                               l2=L.Linear(None, n_units),
                               l3=L.Linear(None, n_out))
示例#15
0
 def __init__(self):
     super(IrisChain, self).__init__(
         l1=L.Linear(4, 6),
         l2=L.Linear(6, 3),
     )
示例#16
0
 def test_insert(self):
     l1 = links.Linear(3, 3)
     self.s1.insert(1, l1)
     self.assertEqual(len(self.s1), 3)
     self.assertIs(self.s1[1], l1)
示例#17
0
from chainer import links as L
import numpy as np

f = L.Linear(3, 2)
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=np.float32)
y = f(x)
print(y.data)
 def __init__(self, n_in, n_units, n_out):
     super(MyQNet, self).__init__(
         l1=L.Convolution2D(in_channels=2,
                            out_channels=n_units,
                            ksize=3,
                            pad=1),  # 6*6
         l200=L.Convolution2D(in_channels=n_units,
                              out_channels=n_units,
                              ksize=3,
                              pad=1),
         l201=L.Convolution2D(in_channels=n_units,
                              out_channels=n_units,
                              ksize=3,
                              pad=1),
         l202=L.Convolution2D(in_channels=n_units,
                              out_channels=n_units,
                              ksize=3,
                              pad=1),
         l203=L.Convolution2D(in_channels=n_units,
                              out_channels=n_units,
                              ksize=3,
                              pad=1),
         l204=L.Convolution2D(in_channels=n_units,
                              out_channels=n_units,
                              ksize=3,
                              pad=1),
         l205=L.Convolution2D(in_channels=n_units,
                              out_channels=n_units,
                              ksize=3,
                              pad=1),
         l206=L.Convolution2D(in_channels=n_units,
                              out_channels=n_units,
                              ksize=3,
                              pad=1),
         l207=L.Convolution2D(in_channels=n_units,
                              out_channels=n_units,
                              ksize=3,
                              pad=1),
         l208=L.Convolution2D(in_channels=n_units,
                              out_channels=n_units,
                              ksize=3,
                              pad=1),  # 追加
         l209=L.Convolution2D(in_channels=n_units,
                              out_channels=n_units,
                              ksize=3,
                              pad=1),  # 追加
         l210=L.Convolution2D(in_channels=n_units,
                              out_channels=n_units,
                              ksize=3,
                              pad=1),  # 追加 
         l211=L.Convolution2D(in_channels=128,
                              out_channels=1,
                              ksize=1,
                              nobias=True),  #    未使用
         l31=L.Bias(shape=(8 * 8)),  # 未使用
         l32=L.Linear(n_units * 64, 256, nobias=True),  # 追加
         l33=L.Linear(256, n_out, nobias=True),  # 追加
         b01=L.BatchNormalization(size=n_units),
         b02=L.BatchNormalization(size=n_units),
         b03=L.BatchNormalization(size=n_units),
         b04=L.BatchNormalization(size=n_units),
         b05=L.BatchNormalization(size=n_units),
         b06=L.BatchNormalization(size=n_units),
         b07=L.BatchNormalization(size=n_units),
         b08=L.BatchNormalization(size=n_units),
         b09=L.BatchNormalization(size=n_units),
         b10=L.BatchNormalization(size=n_units),
     )
示例#19
0
 def __init__(self, n_input, n_output):
     super(LabelEstimator,
           self).__init__(w_xy=links.Linear(n_input, n_output))
示例#20
0
 def __init__(self, n_units, n_out):
     super(MLP, self).__init__()
     with self.init_scope():
         self.l1 = L.Linear(784, n_units)
         self.l2 = L.Linear(n_units, n_units)
         self.l3 = L.Linear(n_units, n_out)
 def __init__(self,
              init_ch=6,
              ch=8,
              out_ch=3,
              activation=F.relu,
              distribution="normal",
              batch_size=64,
              dim_z=3,
              bottom_size=32):
     super(Generator, self).__init__()
     initializer = chainer.initializers.GlorotUniform()
     #initializer_u = chainer.initializers.Uniform(scale=1)
     #initializer_v = chainer.initializers.Uniform(scale=1)
     self.activation = activation
     self.distribution = distribution
     self.batch_size = batch_size
     self.dim_z = dim_z
     self.ch = ch
     with self.init_scope():
         # Encoder
         self.enc1 = Block(init_ch,
                           ch,
                           activation=activation,
                           batch_size=batch_size,
                           is_shortcut=True,
                           dim_z=dim_z)
         self.enc2 = Block(ch,
                           ch * 2,
                           activation=activation,
                           batch_size=batch_size,
                           is_shortcut=True,
                           dim_z=dim_z)
         self.enc3 = Block(ch * 2,
                           ch * 2,
                           activation=activation,
                           batch_size=batch_size,
                           is_shortcut=True,
                           dim_z=dim_z)
         self.linear = L.Linear(ch * 2 * (bottom_size * bottom_size),
                                ch * 2 * (bottom_size * bottom_size))
         # WIP: I have not finished implemented this.
         # This code means reduction of dimension.
         # self.linear = SVDLinear(ch * 4 * (bottom_size * bottom_size), (ch * 4 * (bottom_size * bottom_size)), k=(bottom_size * bottom_size * ch * 4), initialU=initializer_u, initialV=initializer_v)
         self.b4 = L.BatchNormalization(ch * 2 *
                                        (bottom_size * bottom_size))
         self.dec1 = Block(ch * 2,
                           ch * 2,
                           activation=activation,
                           batch_size=batch_size,
                           is_shortcut=False,
                           dim_z=dim_z)
         self.dec2 = Block(ch * 2,
                           ch,
                           activation=activation,
                           batch_size=batch_size,
                           is_shortcut=False,
                           dim_z=dim_z)
         self.b8 = L.BatchNormalization(ch)
         self.l8 = L.Convolution2D(ch,
                                   out_ch,
                                   ksize=3,
                                   stride=1,
                                   pad=1,
                                   initialW=initializer)
示例#22
0
 def __init__(self, cin, cout):
     super(FCN, self).__init__()
     with self.init_scope():
         self.linear = L.Linear(cin, cout)
         self.bn = L.BatchNormalization(cout)
示例#23
0
 def __init__(self):
     super().__init__()
     init_w = initializers.Normal(0.005)
     init_b = initializers.Constant(0.1)
     with self.init_scope():
         self.fc = L.Linear(4096, 256, initialW=init_w, initial_bias=init_b)
示例#24
0
 def __init__(self, n_units, n_out):
     super(MLP, self).__init__()
     with self.init_scope():
         self.l1 = L.Linear(None, n_units)  # n_in -> n_units
         self.l2 = L.Linear(None, n_units)  # n_units -> n_units
         self.l3 = L.Linear(None, n_out)  # n_units -> n_out
示例#25
0
    def test_soft_copy_param_type_check(self):
        a = L.Linear(None, 5)
        b = L.Linear(1, 5)

        with self.assertRaises(TypeError):
            copy_param.soft_copy_param(target_link=a, source_link=b, tau=0.1)
 def __init__(self):
     super(IrisChain, self).__init__(
         # Define layes
         l1=L.Linear(4, 3), )
示例#27
0
 def __init__(self, v, k):
     super(MyRNN, self).__init__(
         embed = L.EmbedID(v, k),
         H  = L.Linear(k, k),
         W = L.Linear(k, v),
     )
示例#28
0
 def __init__(self):
     super(MyChain, self).__init__()
     with self.init_scope():
         self.l1 = L.Linear(None, 3)
         self.l2 = L.Linear(None, 4)
示例#29
0
 def __init__(self, n_hidden, n_out, n_input=None):
     super(BasicGeneratorNetwork, self).__init__()
     with self.init_scope():
         self.l1 = L.Linear(n_input, n_hidden)
         self.l2 = L.BatchNormalization(n_hidden)
         self.l3 = L.Linear(n_hidden, n_out)
示例#30
0
 def __init__(self):
     super(IrisChain, self).__init__(
         l1=L.Linear(
             4, 6),  #4 for data feature ;6 is middle layer Neural Networks
         l2=L.Linear(6, 3),  #3 classification
     )