示例#1
0
    def __init__(self,
                 ch=512,
                 ch_in=512,
                 w_ch=512,
                 upsample=True,
                 enable_blur=False):
        super().__init__()
        self.upsample = upsample
        self.ch = ch
        self.ch_in = ch_in
        with self.init_scope():
            if not upsample:
                self.W = chainer.Parameter(shape=(ch_in, 4, 4))
                self.W.data[:] = 1  # w_data_tmp

            self.b0 = L.Bias(axis=1, shape=(ch, ))
            self.b1 = L.Bias(axis=1, shape=(ch, ))
            self.n0 = NoiseBlock(ch)
            self.n1 = NoiseBlock(ch)

            self.s0 = StyleBlock(w_ch, ch)
            self.s1 = StyleBlock(w_ch, ch)

            self.c0 = EqualizedConv2d(ch_in, ch, 3, 1, 1, nobias=True)
            self.c1 = EqualizedConv2d(ch, ch, 3, 1, 1, nobias=True)

        self.blur_k = None
        self.enable_blur = enable_blur
示例#2
0
文件: bicnn.py 项目: zuacubd/bicnn-mi
 def __init__(self, channels, filter_width, embeddings, k_top, beta, pool_size):
     vocab_size, embed_size = embeddings.shape
     feature_size = [
         pool_size[0][0] * pool_size[0][1],
         pool_size[1][0] * pool_size[1][1] * channels[0],
         pool_size[2][0] * pool_size[2][1] * channels[1],
         pool_size[3][0] * pool_size[3][1] * channels[1],
     ]
     # initialize functions with parameters
     super(BiCNN, self).__init__(
         embed=L.EmbedID(
             in_size=vocab_size,
             out_size=embed_size,
             initialW=embeddings,
         ),
         # convolutions in the first block
         conv1l=self._create_convolution(
             in_channels=1,
             out_channels=channels[0],
             window_size=filter_width[0],
         ),
         conv1r=self._create_convolution(
             in_channels=1,
             out_channels=channels[0],
             window_size=filter_width[0],
         ),
         bias1l=L.Bias(axis=1, shape=(channels[0], ceil(embed_size / 2))),
         bias1r=L.Bias(axis=1, shape=(channels[0], ceil(embed_size / 2))),
         # convolutions in the first block
         conv2l=self._create_convolution(
             in_channels=channels[0],
             out_channels=channels[1],
             window_size=filter_width[1],
         ),
         conv2r=self._create_convolution(
             in_channels=channels[0],
             out_channels=channels[1],
             window_size=filter_width[1],
         ),
         bias2l=L.Bias(axis=1, shape=(channels[1], ceil(embed_size / 4))),
         bias2r=L.Bias(axis=1, shape=(channels[1], ceil(embed_size / 4))),
         # output layer
         linear=L.Linear(
             in_size=sum(feature_size),
             out_size=1,
             bias=0,
         ),
     )
     # retain parameters
     self._channels = channels
     self._filter_width = filter_width
     # self._feature_size = feature_size
     self._vocab_size = vocab_size
     self._embed_size = embed_size
     self._k_top = k_top
     self._beta = beta
     self._pool_size = pool_size
示例#3
0
文件: model.py 项目: shinpoi/course
 def __init__(self):
     super(TINY_D_36ch, self).__init__(
         conv1=L.Convolution2D(36,
                               128,
                               ksize=3,
                               stride=1,
                               pad=1,
                               nobias=True),
         bn1=L.BatchNormalization(128, use_beta=False),
         bias1=L.Bias(shape=(128, )),
         # MaxPool(3x3, 2)
         conv2=L.Convolution2D(128,
                               256,
                               ksize=3,
                               stride=1,
                               pad=1,
                               nobias=True),
         bn2=L.BatchNormalization(256, use_beta=False),
         bias2=L.Bias(shape=(256, )),
         # MaxPool(3x3, 2)
         conv3=L.Convolution2D(256,
                               512,
                               ksize=3,
                               stride=1,
                               pad=1,
                               nobias=True),
         bn3=L.BatchNormalization(512, use_beta=False),
         bias3=L.Bias(shape=(512, )),
         # MaxPool(3x3, 2)
         conv4=L.Convolution2D(512,
                               1024,
                               ksize=3,
                               stride=1,
                               pad=1,
                               nobias=True),
         bn4=L.BatchNormalization(1024, use_beta=False),
         bias4=L.Bias(shape=(1024, )),
         conv5=L.Convolution2D(1024,
                               1024,
                               ksize=1,
                               stride=1,
                               pad=0,
                               nobias=True),
         bn5=L.BatchNormalization(1024, use_beta=False),
         bias5=L.Bias(shape=(1024, )),
         # AvgPool(3x3, 2)
         conv6=L.Convolution2D(1024,
                               64,
                               ksize=1,
                               stride=1,
                               pad=0,
                               nobias=True),
         conv7=L.Convolution2D(64, 2, ksize=1, stride=1, pad=0),
     )
示例#4
0
 def __init__(self, dim):
     super(Denoise, self).__init__(
         a0=L.Scale(W_shape=(dim, )),
         a1=L.Scale(W_shape=(dim, )),
         a2=L.Scale(W_shape=(dim, )),
         a3=L.Bias(shape=(dim, )),
         a4=L.Bias(shape=(dim, )),
         b0=L.Scale(W_shape=(dim, )),
         b1=L.Scale(W_shape=(dim, )),
         b2=L.Scale(W_shape=(dim, )),
         b3=L.Bias(shape=(dim, )),
     )
示例#5
0
    def setUp(self):
        self.x = numpy.random.uniform(-1, 1, (3, 2, 3)).astype(numpy.float32)
        self.b = numpy.random.uniform(-1, 1, (2)).astype(numpy.float32)
        self.y_expected = numpy.copy(self.x)
        for i, j, k in numpy.ndindex(self.y_expected.shape):
            self.y_expected[i, j, k] += self.b[j]
        self.gy = numpy.random.uniform(-1, 1, (3, 2, 3)).astype(numpy.float32)

        axis = 1
        if self.learn_b:
            self.link = links.Bias(axis, self.b.shape)
            self.link.b.data = self.b
        else:
            self.link = links.Bias(axis, None)
        self.link.cleargrads()
示例#6
0
 def __init__(self, blocks=5):
     super(T2Resnet_multi, self).__init__()
     self.blocks = blocks
     with self.init_scope():
         self.l1 = L.Convolution2D(in_channels=104,
                                   out_channels=ch,
                                   ksize=3,
                                   pad=1)
         for i in range(1, blocks):
             block_wobn = Block(True)
             block_bn = Block(False)
             self.add_link('b_wobn{}'.format(i), block_wobn)
             self.add_link('b_bn{}'.format(i), block_bn)
         # policy network
         self.policy = L.Convolution2D(
             in_channels=ch,
             out_channels=MOVE_DIRECTION_LABEL_NUM,
             ksize=1,
             nobias=True)
         self.policy_bias = L.Bias(shape=(9 * 9 * MOVE_DIRECTION_LABEL_NUM))
         # value network
         self.value1 = L.Convolution2D(
             in_channels=ch, out_channels=MOVE_DIRECTION_LABEL_NUM, ksize=1)
         self.value1_bn = L.BatchNormalization(MOVE_DIRECTION_LABEL_NUM)
         self.value2 = L.Linear(9 * 9 * MOVE_DIRECTION_LABEL_NUM, fcl)
         self.value3 = L.Linear(fcl, 1)
示例#7
0
    def __init__(self, in_size=512, out_channel=3):
        super(Generator, self).__init__()
        with self.init_scope():
            self.fc7_1 = L.Linear(in_size, 4 * 4 * 512, nobias=True)
            self.b7_1 = L.Bias(shape=(512, ))
            self.conv7_2 = L.Convolution2D(512, 512, ksize=3, pad=1)

            self.conv6_1 = L.Convolution2D(512, 512, ksize=3, pad=1)
            self.conv6_2 = L.Convolution2D(512, 512, ksize=3, pad=1)

            self.conv5_1 = L.Convolution2D(512, 512, ksize=3, pad=1)
            self.conv5_2 = L.Convolution2D(512, 512, ksize=3, pad=1)

            self.conv4_1 = L.Convolution2D(512, 512, ksize=3, pad=1)
            self.conv4_2 = L.Convolution2D(512, 512, ksize=3, pad=1)

            self.conv3_1 = L.Convolution2D(512, 256, ksize=3, pad=1)
            self.conv3_2 = L.Convolution2D(256, 256, ksize=3, pad=1)

            self.conv2_1 = L.Convolution2D(256, 128, ksize=3, pad=1)
            self.conv2_2 = L.Convolution2D(128, 128, ksize=3, pad=1)

            self.conv1_1 = L.Convolution2D(128, 64, ksize=3, pad=1)
            self.conv1_2 = L.Convolution2D(64, 64, ksize=3, pad=1)

            self.conv0_0 = L.Convolution2D(64, out_channel, ksize=1)
示例#8
0
    def __init__(self,
                 in_dim,
                 hidden_dim,
                 node_dim,
                 n_hidden_layers=0,
                 activation=functions.relu,
                 dropout=0.0):
        # in_dim should be layer_dim
        # hidden_dim is edge_hidden_dim
        # out_dim should be node_dim ** 2
        super(EdgeNetwork, self).__init__()
        with self.init_scope():
            first_linear_layer = links.Linear(in_size=in_dim,
                                              out_size=hidden_dim)
            rest_linear_layers = [
                links.Linear(in_size=hidden_dim, out_size=hidden_dim)
                for _ in range(n_hidden_layers - 1)
            ]
            hidden_layers = [first_linear_layer] + rest_linear_layers
            self.hidden_layers = chainer.ChainList(*hidden_layers)
            if n_hidden_layers == 0:
                self.output_layer = links.Linear(in_size=in_dim,
                                                 out_size=node_dim**2)
            else:
                self.output_layer = links.Linear(in_size=hidden_dim,
                                                 out_size=node_dim**2)
            self.bias_add_layer = links.Bias(axis=1)

        self.in_dim = in_dim
        self.hidden_dim = hidden_dim
        self.node_dim = node_dim
        self.out_dim = self.node_dim**2
        self.n_hidden_layers = n_hidden_layers
        self.activation = activation
        self.dropout = dropout
示例#9
0
    def __init__(self,
                 n_features=None,
                 n_dim=8,
                 lossfun=F.mean_squared_error,
                 lambda0=5e-3,
                 lambda1=5e-3,
                 lambda2=5e-3,
                 init_bias=0.0,
                 intx_term=True,
                 total_nobs=1):
        self.n_dim = n_dim
        self.n_features = n_features
        self.lossfun = lossfun
        self.lambda0 = lambda0
        self.lambda1 = lambda1
        self.lambda2 = lambda2
        self.intx_term = intx_term
        self.total_nobs = total_nobs

        # These are all the learned weights corresponding
        # to the overall bias, slope per feature, and latent
        # interaction vector per feature
        super(FM, self).__init__(bias=L.Bias(shape=(1, )),
                                 slope=L.EmbedID(n_features, 1),
                                 latent=L.EmbedID(n_features, n_dim))

        # Xavier initialize weights
        c = np.sqrt(n_features * n_dim)
        self.latent.W.data[...] = np.random.randn(n_features, n_dim) / c
        d = np.sqrt(n_features)
        self.slope.W.data[...] = np.random.randn(n_features, 1) / d
        self.bias.b.data[...] *= 0.0
        self.bias.b.data[...] += init_bias
示例#10
0
    def __init__(self, ch=512, ch_in=512, upsample=True, enable_blur=False):
        super().__init__()
        self.upsample = upsample
        self.ch = ch
        self.ch_in = ch_in
        with self.init_scope():
            self.b0 = L.Bias(axis=1, shape=(ch, ))
            self.b1 = L.Bias(axis=1, shape=(ch, ))
            self.n0 = NoiseBlock(ch)
            self.n1 = NoiseBlock(ch)

            self.c0 = EqualizedConv2d(ch_in, ch, 3, 1, 1, nobias=True)
            self.c1 = EqualizedConv2d(ch, ch, 3, 1, 1, nobias=True)

        self.blur_k = None
        self.enable_blur = enable_blur
示例#11
0
    def __init__(self,
                 dim_in,
                 dim_hidden,
                 dim_latent,
                 num_layers,
                 num_trans,
                 temperature,
                 num_zsamples=1):
        super(VAE, self).__init__()

        # initialise first encoder and decoder hidden layer separately because
        # the input and output dims differ from the other hidden layers
        self.qlin0 = L.Linear(dim_in, dim_hidden)
        self.plin0 = L.Linear(dim_latent, dim_hidden)
        self._children.append('qlin0')
        self._children.append('plin0')

        for i in range(num_layers - 1):
            # encoder
            layer_name = 'qlin' + str(i + 1)
            setattr(self, layer_name, L.Linear(2 * dim_hidden, dim_hidden))
            self._children.append(layer_name)

            # decoder
            layer_name = 'plin' + str(i + 1)
            setattr(self, layer_name, L.Linear(2 * dim_hidden, dim_hidden))
            self._children.append(layer_name)

        # initialise the encoder and decoder output layer separately because
        # the input and output dims differ from the other hidden layers
        self.qlin_mu = L.Linear(2 * dim_hidden, dim_latent)
        self.qlin_ln_var = L.Linear(2 * dim_hidden, dim_latent)
        self.plin_ln_var = L.Linear(2 * dim_hidden, dim_in)
        self.plin_mu = L.Linear(2 * dim_hidden, dim_in)
        self._children.append('qlin_mu')
        self._children.append('qlin_ln_var')
        self._children.append('plin_mu')
        self._children.append('plin_ln_var')

        # flow
        for i in range(num_trans):
            layer_name = 'flow_w_' + str(i)  # weights
            setattr(self, layer_name,
                    L.Scale(axis=1, W_shape=(dim_latent), bias_term=False))
            self._children.append(layer_name)

            layer_name = 'flow_b_' + str(i)  # bias
            setattr(self, layer_name, L.Bias(axis=0, shape=(1)))
            self._children.append(layer_name)

            layer_name = 'flow_u_' + str(i)  # scaling factor u
            setattr(self, layer_name,
                    L.Scale(axis=1, W_shape=(dim_latent), bias_term=False))
            self._children.append(layer_name)

        self.num_layers = num_layers
        self.num_trans = num_trans
        self.temperature = temperature
        self.num_zsamples = num_zsamples
        self.epochs_seen = 0
示例#12
0
 def __init__(self, blocks=20):
     super(PolicyValueResnet, self).__init__()
     self.blocks = blocks
     with self.init_scope():
         self.l1 = L.Convolution2D(in_channels=104,
                                   out_channels=ch,
                                   ksize=3,
                                   pad=1)
         self.l2 = L.Convolution2D(in_channels=ch,
                                   out_channels=ch,
                                   ksize=3,
                                   pad=1)
         for i in range(1, 20):
             self.add_link('b{}'.format(i), Block())
         # policy network
         self.policy = L.Convolution2D(
             in_channels=ch,
             out_channels=MOVE_DIRECTION_LABEL_NUM,
             ksize=1,
             nobias=True)
         self.policy_bias = L.Bias(shape=(9 * 9 * MOVE_DIRECTION_LABEL_NUM))
         # value network
         self.value1 = L.Convolution2D(
             in_channels=ch, out_channels=MOVE_DIRECTION_LABEL_NUM, ksize=1)
         self.value2 = L.Linear(9 * 9 * MOVE_DIRECTION_LABEL_NUM, fcl)
         self.value3 = L.Linear(fcl, 1)
示例#13
0
    def __init__(self, blocks=5):
        super(PolicyValueResnet, self).__init__()
        self.blocks = blocks
        with self.init_scope():
            self.l1 = L.Convolution2D(in_channels=104,
                                      out_channels=ch,
                                      ksize=3,
                                      pad=1)
            links = [("root0", RoopBlock(ch, ch))]
            n_in = ch
            n_out = 256
            for index in range(1, 5, 1):
                links += [("root{}".format(index), RoopBlock(n_in, n_out))]
                n_in *= 1
                n_out *= 1

            for link in links:
                self.add_link(*link)
            self.forward = links
            # policy network
            self.policy = L.Convolution2D(
                in_channels=n_in,
                out_channels=MOVE_DIRECTION_LABEL_NUM,
                ksize=1,
                nobias=True)
            self.policy_bias = L.Bias(shape=(9 * 9 * MOVE_DIRECTION_LABEL_NUM))
            # value network
            self.value1 = L.Convolution2D(
                in_channels=n_in,
                out_channels=MOVE_DIRECTION_LABEL_NUM,
                ksize=1)
            self.value2 = L.Linear(9 * 9 * MOVE_DIRECTION_LABEL_NUM, fcl)
            self.value3 = L.Linear(fcl, 1)
示例#14
0
 def __init__(self, data, name, learnable=False, is_observed=False):
     self._current_value = coerce_to_dtype(data, is_observed)
     self.name = name
     self._observed = is_observed
     self.parents = ()
     self.learnable = learnable
     if learnable:
         self.link = L.Bias(axis=1, shape=self._current_value.shape[1:])
示例#15
0
 def __init__(self):
     super(PolicyNetwork, self).__init__()
     with self.init_scope():
         self.l1 = L.Convolution2D(in_channels=104,
                                   out_channels=ch,
                                   ksize=3,
                                   pad=1)
         self.l2 = L.Convolution2D(in_channels=ch,
                                   out_channels=ch,
                                   ksize=3,
                                   pad=1)
         self.l3 = L.Convolution2D(in_channels=ch,
                                   out_channels=ch,
                                   ksize=3,
                                   pad=1)
         self.l4 = L.Convolution2D(in_channels=ch,
                                   out_channels=ch,
                                   ksize=3,
                                   pad=1)
         self.l5 = L.Convolution2D(in_channels=ch,
                                   out_channels=ch,
                                   ksize=3,
                                   pad=1)
         self.l6 = L.Convolution2D(in_channels=ch,
                                   out_channels=ch,
                                   ksize=3,
                                   pad=1)
         self.l7 = L.Convolution2D(in_channels=ch,
                                   out_channels=ch,
                                   ksize=3,
                                   pad=1)
         self.l8 = L.Convolution2D(in_channels=ch,
                                   out_channels=ch,
                                   ksize=3,
                                   pad=1)
         self.l9 = L.Convolution2D(in_channels=ch,
                                   out_channels=ch,
                                   ksize=3,
                                   pad=1)
         self.l10 = L.Convolution2D(in_channels=ch,
                                    out_channels=ch,
                                    ksize=3,
                                    pad=1)
         self.l11 = L.Convolution2D(in_channels=ch,
                                    out_channels=ch,
                                    ksize=3,
                                    pad=1)
         self.l12 = L.Convolution2D(in_channels=ch,
                                    out_channels=ch,
                                    ksize=3,
                                    pad=1)
         self.l13 = L.Convolution2D(in_channels=ch,
                                    out_channels=MOVE_DIRECTION_LABEL_NUM,
                                    ksize=1,
                                    nobias=True)
         self.l13_bias = L.Bias(shape=(9 * 9 * MOVE_DIRECTION_LABEL_NUM))
示例#16
0
    def __init__(self, n_user: int, n_item: int, n_factor=10):
        super(LinearWeightAdaption, self).__init__()

        # settings
        self.n_user = n_user
        self.n_item = n_item
        self.n_factor = n_factor

        with self.init_scope():
            # architecture
            self.embedId = links.EmbedID(self.n_item, self.n_factor)
            self.linear = links.Convolution2D(1, 1, (1, 2), nobias=True, pad=0)
            self.bias = links.Bias(shape=(1, ))
示例#17
0
 def __init__(self):
     ksize = 3
     super(SLPolicy, self).__init__()
     with self.init_scope():
         self.block1 = Block(64, ksize)
         self.block2 = Block(128, ksize)
         self.block3 = Block(128, ksize)
         self.block4 = Block(128, ksize)
         self.block5 = Block(128, ksize)
         self.block6 = Block(128, ksize)
         self.block7 = Block(128, ksize)
         self.block8 = Block(128, ksize)
         self.conv9 = L.Convolution2D(128, 1, 1, nobias=True)
         self.bias10 = L.Bias(shape=(64))
示例#18
0
def deconv(self, variable):
    v = variable
    if (v.creator is not None):
        # Convolution -> Deconvolutionに変換
        if (v.creator.label == 'Convolution2DFunction'):
            print(v.creator.label, v.rank)
            convW = v.creator.inputs[1].data
            in_cn, out_cn = convW.shape[0], convW.shape[1]  # in/out channels
            kh, kw = convW.shape[2], convW.shape[3]  # kernel size
            sx, sy = v.creator.sx, v.creator.sy  # stride
            pw, ph = v.creator.pw, v.creator.ph  # padding

            name = 'conv' + v.rank  # temporal layer name
            super(DeconvNet, self).add_link(
                name,
                L.Deconvolution2D(in_cn,
                                  out_cn, (kh, kw),
                                  stride=(sy, sx),
                                  pad=(ph, pw),
                                  nobias=True,
                                  initialW=convW))
            self.forwards[name] = self[name]
            # もし畳み込み層にバイアスがある場合、それも登録
            if len(v.creator.inputs) == 3:
                F.bias(v)
                b = v.creator.inputs[2].data
                bname = 'convb' + v.rank
                super(DeconvNet, self).add_link(bname, L.Bias(shape=b.shape))
                self[bname].b.data = b
                self.depends[bname] = (parent)
                self.depends[name] = (bname)
                self.forwards[bname] = self[bname]
                self.layers.append((bname, [parent], name))
            else:
                self.depends[name] = (parent)

        elif (v.creator.label == 'ReLU'):
            name = parent
        elif (v.creator.label == 'MaxPooling2D'):
            kw, kh = v.creator.kw, v.creator.kh
            sx, sy = v.creator.sx, v.creator.sy
            pw, ph = v.creator.pw, v.creator.ph
            name = 'maxpool' + v.rank
            self.depends[name] = (parent)
            self.forwards[name] = lambda x: F.unpooling_2d(
                x, (kh, kw), stride=(sy, sx), pad=(ph, pw))

        self.register_inv_layer(v.creator.inputs[0], name)
    else:
        depends['output'] = parent
示例#19
0
def darknetConv2D(in_channel, out_channel, bn=True):
    if (bn):
        return Chain(
            c=L.Convolution2D(in_channel,
                              out_channel,
                              ksize=3,
                              pad=1,
                              nobias=True),
            n=L.BatchNormalization(out_channel, use_beta=False, eps=0.000001),
            b=L.Bias(shape=[
                out_channel,
            ]),
        )
    else:
        return Chain(
            c=L.Convolution2D(in_channel,
                              out_channel,
                              ksize=3,
                              pad=1,
                              nobias=True),
            b=L.Bias(shape=[
                out_channel,
            ]),
        )
示例#20
0
 def __init__(self,
              hidden_dim,
              tracking_lstm_hidden_dim,
              num_actions=2,
              make_logits=False):
     super(TrackingLSTM, self).__init__(
         W_x=L.Linear(tracking_lstm_hidden_dim / 2,
                      tracking_lstm_hidden_dim / 2 * 4),
         W_h=L.Linear(tracking_lstm_hidden_dim / 2,
                      tracking_lstm_hidden_dim / 2 * 4),
         bias=L.Bias(axis=1, shape=(tracking_lstm_hidden_dim / 2 * 4, )),
     )
     # TODO: TrackingLSTM should be able to be a size different from hidden_dim.
     if make_logits:
         self.add_link('logits',
                       L.Linear(tracking_lstm_hidden_dim / 2, num_actions))
示例#21
0
 def __init__(self, n_classes_fcn, n_classes_yolo, n_boxes):
     super(YOLOv2, self).__init__(
         conv1=L.Convolution2D(3, 64, 3, stride=1, pad=1, nobias=True),
         bn1=L.BatchNormalization(64, use_beta=False, eps=2e-5),
         bias1=L.Bias(shape=(64, )),
         conv2=L.Convolution2D(None, 64, 3, stride=1, pad=1, nobias=True),
         bn2=L.BatchNormalization(64, use_beta=False, eps=2e-5),
         bias2=L.Bias(shape=(64, )),
         conv3=L.Convolution2D(None, 128, 3, stride=1, pad=1, nobias=True),
         bn3=L.BatchNormalization(128, use_beta=False, eps=2e-5),
         bias3=L.Bias(shape=(128, )),
         conv4=L.Convolution2D(None, 128, 3, stride=1, pad=1, nobias=True),
         bn4=L.BatchNormalization(128, use_beta=False, eps=2e-5),
         bias4=L.Bias(shape=(128, )),
         conv5=L.Convolution2D(None, 256, 3, stride=1, pad=1, nobias=True),
         bn5=L.BatchNormalization(256, use_beta=False, eps=2e-5),
         bias5=L.Bias(shape=(256, )),
         conv6=L.Convolution2D(None, 256, 3, stride=1, pad=1, nobias=True),
         bn6=L.BatchNormalization(256, use_beta=False, eps=2e-5),
         bias6=L.Bias(shape=(256, )),
         conv7=L.Convolution2D(None, 256, 3, stride=1, pad=1, nobias=True),
         bn7=L.BatchNormalization(256, use_beta=False, eps=2e-5),
         bias7=L.Bias(shape=(256, )),
         pool1=L.Convolution2D(None, n_classes_fcn, 1, stride=1, pad=0),
         upsample3=L.Deconvolution2D(None,
                                     n_classes_fcn,
                                     ksize=16,
                                     stride=8,
                                     pad=4),
         conv14=L.Convolution2D(None, 1024, 3, stride=1, pad=1,
                                nobias=True),
         bn14=L.BatchNormalization(1024, use_beta=False, eps=2e-5),
         bias14=L.Bias(shape=(1024, )),
         conv15=L.Convolution2D(None,
                                n_boxes * (5 + n_classes_yolo),
                                ksize=1,
                                stride=1,
                                pad=0),
     )
     self.n_boxes = n_boxes
     self.n_classes_fcn = n_classes_fcn
     self.n_classes_yolo = n_classes_yolo
     self.finetune = False
示例#22
0
    def __init__(self, n_features=None, n_dim=8, lossfun=F.mean_squared_error,
                 lambda0=1, lambda1=1, lambda2=1, init_bias_mu=0.0,
                 init_bias_lv=0.0, intx_term=True, total_nobs=1):
        self.n_dim = n_dim
        self.n_features = n_features
        self.lossfun = lossfun
        self.lambda0 = lambda0
        self.lambda1 = lambda1
        self.lambda2 = lambda2
        self.intx_term = intx_term
        self.total_nobs = total_nobs

        # In contrast to the FM model, the slopes and latent vectors
        # will have means (mu) and log variances (lv) for each component.
        ones_3d = (1, 1, 1)
        super(AutoVFM, self).__init__(bias_mu=L.Bias(shape=(1,)),
                                      bias_lv=L.Bias(shape=(1,)),
                                      slop_mu=L.Bias(shape=(1, 1)),
                                      slop_lv=L.Bias(shape=(1, 1)),
                                      slop_delta_mu=L.EmbedID(n_features, 1,
                                                              ignore_label=-1),
                                      slop_delta_lv=L.EmbedID(n_features, 1,
                                                              ignore_label=-1),
                                      feat_mu_vec=L.Bias(shape=(1, 1, n_dim)),
                                      feat_lv_vec=L.Bias(shape=(1, 1, n_dim)),
                                      hyper_feat_lv_vec=L.Bias(shape=ones_3d),
                                      feat_delta_mu=L.EmbedID(n_features, n_dim,
                                                              ignore_label=-1),
                                      feat_delta_lv=L.EmbedID(n_features, n_dim,
                                                              ignore_label=-1),
                                      hyper_feat_delta_lv=L.Bias(shape=ones_3d))

        # Xavier initialize weights
        c = np.sqrt(n_features * n_dim) * 1e3
        d = np.sqrt(n_features) * 1e3
        self.feat_delta_mu.W.data[...] = np.random.randn(n_features, n_dim) / c
        self.feat_delta_lv.W.data[...] = np.random.randn(n_features, n_dim) / c
        self.slop_delta_mu.W.data[...] = np.random.randn(n_features, 1) / d
        self.slop_delta_lv.W.data[...] = np.random.randn(n_features, 1) / d
        self.bias_mu.b.data[...] *= 0.0
        self.bias_mu.b.data[...] += init_bias_mu
        self.bias_lv.b.data[...] *= 0.0
        self.bias_lv.b.data[...] += init_bias_lv
示例#23
0
    def get_conv_stack(self,
                       input,
                       output,
                       ksize=3,
                       stride=1,
                       pad=1,
                       nobias=True,
                       use_beta=False):
        conv = L.Convolution2D(input,
                               output,
                               ksize=ksize,
                               stride=stride,
                               pad=pad,
                               nobias=nobias)
        bn = L.BatchNormalization(output, use_beta=use_beta)
        bias = L.Bias(shape=(output, ))

        return conv, bn, bias
示例#24
0
    def __init__(self):
        super(PolicyNetwork, self).__init__()
        with self.init_scope():
            self.conv1_1 = L.Convolution2D(104, ch, 3, pad=1)
            self.conv1_2 = L.Convolution2D(ch, ch, 3, pad=1)

            self.conv2_1 = L.Convolution2D(ch, ch * 2, 3, pad=1)
            self.conv2_2 = L.Convolution2D(ch * 2, ch * 2, 3, pad=1)

            self.conv3_1 = L.Convolution2D(ch * 2, ch * 4, 3, pad=1)
            self.conv3_2 = L.Convolution2D(ch * 4, ch * 4, 3, pad=1)
            self.conv3_3 = L.Convolution2D(ch * 4, ch * 4, 3, pad=1)
            self.conv3_4 = L.Convolution2D(ch * 4, ch * 4, 3, pad=1)
            self.l13 = L.Convolution2D(in_channels=ch * 4,
                                       out_channels=MOVE_DIRECTION_LABEL_NUM,
                                       ksize=1,
                                       nobias=True)
            self.l13_bias = L.Bias(shape=(9 * 9 * MOVE_DIRECTION_LABEL_NUM))
示例#25
0
    def __init__(self,
                 hidden_dim,
                 tracking_lstm_hidden_dim,
                 use_external=False,
                 prefix="TreeLSTMChain",
                 gpu=-1):
        super(TreeLSTMChain, self).__init__(
            W_l=L.Linear(hidden_dim / 2, hidden_dim / 2 * 5, nobias=True),
            W_r=L.Linear(hidden_dim / 2, hidden_dim / 2 * 5, nobias=True),
            b=L.Bias(axis=1, shape=(hidden_dim / 2 * 5, )),
        )
        assert hidden_dim % 2 == 0, "The hidden_dim must be even because contains c and h."
        self.hidden_dim = hidden_dim / 2
        self.__gpu = gpu
        self.__mod = cuda.cupy if gpu >= 0 else np
        self.use_external = use_external

        if use_external:
            self.add_link(
                'W_external',
                L.Linear(tracking_lstm_hidden_dim / 2, hidden_dim / 2 * 5))
示例#26
0
 def __init__(self):
     super(MyChain, self).__init__(
         l1=L.Convolution2D(in_channels=None,
                            out_channels=k,
                            ksize=3,
                            pad=1),
         l2=L.Convolution2D(in_channels=k, out_channels=k, ksize=3, pad=1),
         l3=L.Convolution2D(in_channels=k, out_channels=k, ksize=3, pad=1),
         l4=L.Convolution2D(in_channels=k, out_channels=k, ksize=3, pad=1),
         l5=L.Convolution2D(in_channels=k, out_channels=k, ksize=3, pad=1),
         l6=L.Convolution2D(in_channels=k, out_channels=k, ksize=3, pad=1),
         l7=L.Convolution2D(in_channels=k, out_channels=k, ksize=3, pad=1),
         l8=L.Convolution2D(in_channels=k, out_channels=k, ksize=3, pad=1),
         l9=L.Convolution2D(in_channels=k, out_channels=k, ksize=3, pad=1),
         l10=L.Convolution2D(in_channels=k, out_channels=k, ksize=3, pad=1),
         l11=L.Convolution2D(in_channels=k, out_channels=k, ksize=3, pad=1),
         l12=L.Convolution2D(in_channels=k, out_channels=k, ksize=3, pad=1),
         l13=L.Convolution2D(in_channels=k,
                             out_channels=len(shogi.PIECE_TYPES),
                             ksize=1,
                             nobias=True),
         l13_2=L.Bias(shape=(9 * 9 * len(shogi.PIECE_TYPES))))
示例#27
0
 def __init__(self):
     super(PolicyNetwork, self).__init__(
         l1=L.Convolution2D(in_channels=None,
                            out_channels=k,
                            ksize=3,
                            pad=1),
         l2=L.Convolution2D(in_channels=k, out_channels=k, ksize=3, pad=1),
         l3=L.Convolution2D(in_channels=k, out_channels=k, ksize=3, pad=1),
         l4=L.Convolution2D(in_channels=k, out_channels=k, ksize=3, pad=1),
         l5=L.Convolution2D(in_channels=k, out_channels=k, ksize=3, pad=1),
         l6=L.Convolution2D(in_channels=k, out_channels=k, ksize=3, pad=1),
         l7=L.Convolution2D(in_channels=k, out_channels=k, ksize=3, pad=1),
         l8=L.Convolution2D(in_channels=k, out_channels=k, ksize=3, pad=1),
         l9=L.Convolution2D(in_channels=k, out_channels=k, ksize=3, pad=1),
         l10=L.Convolution2D(in_channels=k, out_channels=k, ksize=3, pad=1),
         l11=L.Convolution2D(in_channels=k, out_channels=k, ksize=3, pad=1),
         l12=L.Convolution2D(in_channels=k, out_channels=k, ksize=3, pad=1),
         l13=L.Convolution2D(in_channels=k,
                             out_channels=MOVE_DIRECTION_LABEL_NUM,
                             ksize=1,
                             nobias=True),
         l13_2=L.Bias(shape=(9 * 9 * MOVE_DIRECTION_LABEL_NUM)))
示例#28
0
文件: net.py 项目: chesternimiz/ddgm
 def __init__(self):
     self.n_experts = 128
     self.z_dim = 10
     self.data_dim = 28 * 28
     super(DDGMNet, self).__init__(
         # Generative Model
         gm_linear1=L.Linear(self.z_dim, 128),
         gm_linear2=L.Linear(128, 128),
         gm_linear3=L.Linear(128, self.data_dim),
         # Energy Model
         # add em_ prefix to components of the energy model for a trick about gradients
         em_linear1=L.Linear(self.data_dim, 128),
         em_linear2=L.Linear(128, 128),
         em_experts=L.Linear(128, self.n_experts),
         em_bias=L.Bias(shape=(self.data_dim, )),
     )
     self.add_param('em_ln_var', tuple())
     self.em_ln_var.data = self.xp.zeros(self.em_ln_var.data.shape,
                                         dtype='float32')
     self.em_params = []
     for k, v in self.namedparams():
         if k.startswith('/em_'): self.em_params.append(v)
示例#29
0
 def __init__(self, in_channels, out_channels,
              ksize=None, stride=1, pad=0, 
              batch_normalize=False, activation='linear', initialW=None):
     super(Convolution, self).__init__()
     
     self.batch_normalize = batch_normalize
     
     if activation not in ['linear', 'leaky']:
         raise ValueError()
     
     if 'linear' == activation:
         self.activation = F.identity
     elif 'leaky' == activation:
         self.activation = lambda x: F.leaky_relu(x, slope=0.1)
     
     with self.init_scope():
         self.conv = L.Convolution2D(in_channels, out_channels, 
                                     ksize, stride, pad, 
                                     nobias=True,
                                     initialW=initialW)
         if batch_normalize:
             self.bn = L.BatchNormalization(out_channels)
         
         self.b = L.Bias(shape=(out_channels,))
示例#30
0
    def __init__(self, n_classes, n_boxes):
        super(YOLOv2, self).__init__(
            ##### common layers for both pretrained layers and yolov2 #####
            conv1=L.Convolution2D(3, 32, ksize=3, stride=1, pad=1,
                                  nobias=True),
            bn1=L.BatchNormalization(32, use_beta=False, eps=2e-5),
            bias1=L.Bias(shape=(32, )),
            conv2=L.Convolution2D(32,
                                  64,
                                  ksize=3,
                                  stride=1,
                                  pad=1,
                                  nobias=True),
            bn2=L.BatchNormalization(64, use_beta=False, eps=2e-5),
            bias2=L.Bias(shape=(64, )),
            conv3=L.Convolution2D(64,
                                  128,
                                  ksize=3,
                                  stride=1,
                                  pad=1,
                                  nobias=True),
            bn3=L.BatchNormalization(128, use_beta=False, eps=2e-5),
            bias3=L.Bias(shape=(128, )),
            conv4=L.Convolution2D(128,
                                  64,
                                  ksize=1,
                                  stride=1,
                                  pad=0,
                                  nobias=True),
            bn4=L.BatchNormalization(64, use_beta=False, eps=2e-5),
            bias4=L.Bias(shape=(64, )),
            conv5=L.Convolution2D(64,
                                  128,
                                  ksize=3,
                                  stride=1,
                                  pad=1,
                                  nobias=True),
            bn5=L.BatchNormalization(128, use_beta=False, eps=2e-5),
            bias5=L.Bias(shape=(128, )),
            conv6=L.Convolution2D(128,
                                  256,
                                  ksize=3,
                                  stride=1,
                                  pad=1,
                                  nobias=True),
            bn6=L.BatchNormalization(256, use_beta=False, eps=2e-5),
            bias6=L.Bias(shape=(256, )),
            conv7=L.Convolution2D(256,
                                  128,
                                  ksize=1,
                                  stride=1,
                                  pad=0,
                                  nobias=True),
            bn7=L.BatchNormalization(128, use_beta=False, eps=2e-5),
            bias7=L.Bias(shape=(128, )),
            conv8=L.Convolution2D(128,
                                  256,
                                  ksize=3,
                                  stride=1,
                                  pad=1,
                                  nobias=True),
            bn8=L.BatchNormalization(256, use_beta=False, eps=2e-5),
            bias8=L.Bias(shape=(256, )),
            conv9=L.Convolution2D(256,
                                  512,
                                  ksize=3,
                                  stride=1,
                                  pad=1,
                                  nobias=True),
            bn9=L.BatchNormalization(512, use_beta=False, eps=2e-5),
            bias9=L.Bias(shape=(512, )),
            conv10=L.Convolution2D(512,
                                   256,
                                   ksize=1,
                                   stride=1,
                                   pad=0,
                                   nobias=True),
            bn10=L.BatchNormalization(256, use_beta=False, eps=2e-5),
            bias10=L.Bias(shape=(256, )),
            conv11=L.Convolution2D(256,
                                   512,
                                   ksize=3,
                                   stride=1,
                                   pad=1,
                                   nobias=True),
            bn11=L.BatchNormalization(512, use_beta=False, eps=2e-5),
            bias11=L.Bias(shape=(512, )),
            conv12=L.Convolution2D(512,
                                   256,
                                   ksize=1,
                                   stride=1,
                                   pad=0,
                                   nobias=True),
            bn12=L.BatchNormalization(256, use_beta=False, eps=2e-5),
            bias12=L.Bias(shape=(256, )),
            conv13=L.Convolution2D(256,
                                   512,
                                   ksize=3,
                                   stride=1,
                                   pad=1,
                                   nobias=True),
            bn13=L.BatchNormalization(512, use_beta=False, eps=2e-5),
            bias13=L.Bias(shape=(512, )),
            conv14=L.Convolution2D(512,
                                   1024,
                                   ksize=3,
                                   stride=1,
                                   pad=1,
                                   nobias=True),
            bn14=L.BatchNormalization(1024, use_beta=False, eps=2e-5),
            bias14=L.Bias(shape=(1024, )),
            conv15=L.Convolution2D(1024,
                                   512,
                                   ksize=1,
                                   stride=1,
                                   pad=0,
                                   nobias=True),
            bn15=L.BatchNormalization(512, use_beta=False, eps=2e-5),
            bias15=L.Bias(shape=(512, )),
            conv16=L.Convolution2D(512,
                                   1024,
                                   ksize=3,
                                   stride=1,
                                   pad=1,
                                   nobias=True),
            bn16=L.BatchNormalization(1024, use_beta=False, eps=2e-5),
            bias16=L.Bias(shape=(1024, )),
            conv17=L.Convolution2D(1024,
                                   512,
                                   ksize=1,
                                   stride=1,
                                   pad=0,
                                   nobias=True),
            bn17=L.BatchNormalization(512, use_beta=False, eps=2e-5),
            bias17=L.Bias(shape=(512, )),
            conv18=L.Convolution2D(512,
                                   1024,
                                   ksize=3,
                                   stride=1,
                                   pad=1,
                                   nobias=True),
            bn18=L.BatchNormalization(1024, use_beta=False, eps=2e-5),
            bias18=L.Bias(shape=(1024, )),

            ###### new layer
            conv19=L.Convolution2D(1024,
                                   1024,
                                   ksize=3,
                                   stride=1,
                                   pad=1,
                                   nobias=True),
            bn19=L.BatchNormalization(1024, use_beta=False),
            bias19=L.Bias(shape=(1024, )),
            conv20=L.Convolution2D(1024,
                                   1024,
                                   ksize=3,
                                   stride=1,
                                   pad=1,
                                   nobias=True),
            bn20=L.BatchNormalization(1024, use_beta=False),
            bias20=L.Bias(shape=(1024, )),
            conv21=L.Convolution2D(3072,
                                   1024,
                                   ksize=3,
                                   stride=1,
                                   pad=1,
                                   nobias=True),
            bn21=L.BatchNormalization(1024, use_beta=False),
            bias21=L.Bias(shape=(1024, )),
            conv22=L.Convolution2D(1024,
                                   n_boxes * (5 + n_classes),
                                   ksize=1,
                                   stride=1,
                                   pad=0,
                                   nobias=True),
            bias22=L.Bias(shape=(n_boxes * (5 + n_classes), )),
        )
        self.train = False
        self.finetune = False
        self.n_boxes = n_boxes
        self.n_classes = n_classes