Ejemplo n.º 1
0
 def __init__(self, n_units, n_out, n_worker):
     super(CNN, self).__init__()
     with self.init_scope():
         dim, c_in, c_out, = 1, 1, 1
         #引数は、次元、入力チャネル、出力チャネル、フィルタサイズ、ストライド
         self.c1 = L.ConvolutionND(dim,
                                   c_in,
                                   c_out,
                                   n_worker,
                                   stride=n_worker)
         #正則化してみる
         self.bn1 = L.BatchNormalization(c_out)
         # the size of the inputs to each layer will be inferred
         self.l1 = L.Linear(None, n_units)  # n_in -> n_units
         #self.l2 = L.Linear(None, n_units)  # n_units -> n_units
         #self.l3 = L.Linear(None, n_units)  # n_units -> n_units
         #self.l4 = L.Linear(None, n_units)  # n_units -> n_units
         self.lout = L.Linear(None, n_out)  # n_units -> n_out
Ejemplo n.º 2
0
    def __init__(self, n_voc, n_hidden):
        super(Generator, self).__init__()
        self.n_voc = n_voc
        self.n_hidden = n_hidden
        initializer = chainer.initializers.HeNormal()
        with self.init_scope():
            self.l = L.Linear(16 * 16)
            self.deconv = L.DeconvolutionND(1, 16, 32, 7, stride=2, pad=2)

            self.block1 = GenBlock(32, 32, 4, 2, 1)
            self.block2 = GenBlock(32, 64, 4, 2, 1)
            self.block3 = GenBlock(64, 64, 4, 2, 1)
            self.block4 = GenBlock(64, 128, 4, 2, 1)
            self.block5 = DisBlock(128, 128, 5, 2, 0)
            self.block6 = DisBlock(128, 256, 3, 1, 1)

            self.bn = L.BatchNormalization(256)
            self.decoder = L.ConvolutionND(1, 256, n_voc, ksize=1, stride=1)
Ejemplo n.º 3
0
 def __init__(self, base=64):
     super(Refine, self).__init__()
     with self.init_scope():
         self.cbr0 = CBR_3D(3, base, down=True)
         self.cbr1 = CBR_3D(base, base * 2, down=True)
         self.cbr2 = CBR_3D(base * 2, base * 4, down=True)
         self.cbr3 = CBR_3D(base * 4, base * 8, down=True)
         self.cbr4 = CBR_3D(base * 16, base * 4, up=True)
         self.cbr5 = CBR_3D(base * 8, base * 2, up=True)
         self.cbr6 = CBR_3D(base * 4, base, up=True)
         self.cbr7 = CBR_3D(base * 2, base, up=True)
         self.cbr8 = L.ConvolutionND(3,
                                     base,
                                     3,
                                     3,
                                     1,
                                     1,
                                     initialW=initializers.Normal(0.01))
Ejemplo n.º 4
0
    def __init__(self, n_dims, n_actions):
        self.head = links.Sequence(
            L.ConvolutionND(ndim=1,
                            in_channels=n_dims,
                            out_channels=100,
                            ksize=3,
                            stride=1,
                            pad=1,
                            cover_all=True), F.relu)
        self.pi = policies.FCSoftmaxPolicy(n_input_channels=100,
                                           n_actions=n_actions,
                                           n_hidden_layers=2,
                                           n_hidden_channels=100)
        self.v = v_functions.FCVFunction(n_input_channels=100,
                                         n_hidden_layers=2,
                                         n_hidden_channels=100)

        super(A3CFF, self).__init__(self.head, self.pi, self.v)
Ejemplo n.º 5
0
    def __init__(self, maxdisp, gpu0, gpu1, training=True, train_type=None):
        super(PSMNet, self).__init__()
        self.gpu0 = gpu0
        self.gpu1 = gpu1
        self.training = training
        self.train_type = train_type

        with self.init_scope():
            self.maxdisp = maxdisp
            self.feature_extraction = feature_extraction(self.gpu0)

            self.dres0 = Sequential(convbn_3d(64, 32, 3, 1, 1), F.relu,
                                    convbn_3d(32, 32, 3, 1, 1),
                                    F.relu).to_gpu(self.gpu1)

            self.dres1 = Sequential(convbn_3d(32, 32, 3, 1, 1), F.relu,
                                    convbn_3d(32, 32, 3, 1,
                                              1)).to_gpu(self.gpu1)

            self.dres2 = Sequential(convbn_3d(32, 32, 3, 1, 1), F.relu,
                                    convbn_3d(32, 32, 3, 1,
                                              1)).to_gpu(self.gpu1)

            self.dres3 = Sequential(convbn_3d(32, 32, 3, 1, 1), F.relu,
                                    convbn_3d(32, 32, 3, 1,
                                              1)).to_gpu(self.gpu1)

            self.dres4 = Sequential(convbn_3d(32, 32, 3, 1, 1), F.relu,
                                    convbn_3d(32, 32, 3, 1,
                                              1)).to_gpu(self.gpu1)

            self.classify = Sequential(
                convbn_3d(32, 32, 3, 1, 1), F.relu,
                L.ConvolutionND(3,
                                32,
                                1,
                                ksize=3,
                                stride=1,
                                pad=1,
                                nobias=True,
                                initialW=ini.Normal(
                                    math.sqrt(2. / (3 * 3 * 3 * 1))))).to_gpu(
                                        self.gpu1)
Ejemplo n.º 6
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size=2,
              pooling="f",
              zoneout=False,
              zoneout_ratio=0.1,
              wgain=1):
     self.num_split = len(pooling) + 1
     wstd = kernel_size * out_channels * wgain
     super(QRNN, self).__init__(
         W=links.ConvolutionND(1,
                               in_channels,
                               self.num_split * out_channels,
                               kernel_size,
                               stride=1,
                               pad=kernel_size - 1,
                               initialW=initializers.Normal(wstd)))
     self._in_channels, self._out_channels, self._kernel_size, self._pooling, self._zoneout, self._zoneout_ratio = in_channels, out_channels, kernel_size, pooling, zoneout, zoneout_ratio
     self.reset_state()
Ejemplo n.º 7
0
    def __init__(self, n_layers, n_dim, n_filters, filter_size, pad_size,
                 n_output, use_bn):
        links = []

        for i_layer in range(n_layers):
            links.append(
                L.ConvolutionND(ndim=n_dim,
                                in_channels=1 if i_layer == 0 else n_filters,
                                out_channels=n_filters,
                                ksize=filter_size,
                                stride=1,
                                pad=pad_size,
                                initialW=chainer.initializers.HeNormal()))
            if use_bn:
                links.append(L.BatchNormalization(n_filters))

        links.append(L.Linear(None, n_output))

        self.n_layers = len(links)
        self.h = {}
        super(CNN_ND, self).__init__(*links)
Ejemplo n.º 8
0
    def __init__(self,
                 ch0,
                 ch1,
                 bn=True,
                 sample='down',
                 activation=F.relu,
                 dropout=False):
        self.bn = bn
        self.activation = activation
        self.dropout = dropout
        layers = {}
        w = chainer.initializers.Normal(0.02)
        if sample == 'down':
            layers['c'] = L.Convolution2D(ch0, ch1, 4, 2, 1, initialW=w)

        elif sample == "dis":
            layers['c'] = L.ConvolutionND(3, ch0, ch1, 4, 2, 1, initialW=w)
        else:
            layers['c'] = L.DeconvolutionND(3, ch0, ch1, 4, 2, 1, initialW=w)
        if bn:
            layers['batchnorm'] = L.BatchNormalization(ch1)
        super(CBR, self).__init__(**layers)
Ejemplo n.º 9
0
 def __init__(self,
              in_size,
              out_size,
              kernel_size=2,
              attention=False,
              decoder=False):
     if kernel_size == 1:
         super().__init__(W=Linear(in_size, 3 * out_size))
     elif kernel_size == 2:
         super().__init__(W=Linear(in_size, 3 * out_size, nobias=True),
                          V=Linear(in_size, 3 * out_size))
     else:
         super().__init__(conv=L.ConvolutionND(1,
                                               in_size,
                                               3 * out_size,
                                               kernel_size,
                                               stride=1,
                                               pad=kernel_size - 1))
     if attention:
         self.add_link('U', Linear(out_size, 3 * in_size))
         self.add_link('o', Linear(2 * out_size, out_size))
     self.in_size, self.size, self.attention = in_size, out_size, attention
     self.kernel_size = kernel_size
Ejemplo n.º 10
0
 def __init__(self, in_channels, out_channels, ksize, stride, pad):
     super(DisBlock, self).__init__()
     with self.init_scope():
         self.bn = L.BatchNormalization(in_channels)
         self.conv = L.ConvolutionND(1, in_channels, out_channels, ksize,
                                     stride, pad)
Ejemplo n.º 11
0
    def __init__(self, num_of_label):
        w = chainer.initializers.HeNormal()
        super(UNet3D, self).__init__()
        with self.init_scope():
            # encoder pass
            self.ce0 = L.ConvolutionND(ndim=3, in_channels=1, out_channels=16, ksize=3, pad=1,initialW=w)
            self.bne0 = L.BatchNormalization(16)
            self.ce1 = L.ConvolutionND(ndim=3, in_channels=16, out_channels=32, ksize=3, pad=1,initialW=w)
            self.bne1 = L.BatchNormalization(32)

            self.ce2 = L.ConvolutionND(ndim=3, in_channels=32, out_channels=32, ksize=3, pad=1, initialW=w)
            self.bne2 = L.BatchNormalization(32)
            self.ce3 = L.ConvolutionND(ndim=3, in_channels=32, out_channels=64, ksize=3, pad=1, initialW=w)
            self.bne3 = L.BatchNormalization(64)

            self.ce4 = L.ConvolutionND(ndim=3, in_channels=64, out_channels=64, ksize=3, pad=1, initialW=w)
            self.bne4 = L.BatchNormalization(64)

            # decoder pass
            self.cd4 = L.ConvolutionND(ndim=3, in_channels=64, out_channels=128, ksize=3, pad=1, initialW=w)
            self.bnd4 = L.BatchNormalization(128)
            self.deconv2 = L.DeconvolutionND(ndim=3, in_channels=128, out_channels=128, ksize=2, stride=2, initialW=w, nobias=True)

            self.cd3 = L.ConvolutionND(ndim=3, in_channels=64+128, out_channels=64, ksize=3, pad=1, initialW=w)
            self.bnd3 = L.BatchNormalization(64)
            self.cd2 = L.ConvolutionND(ndim=3, in_channels=64, out_channels=64, ksize=3, pad=1, initialW=w)
            self.bnd2 = L.BatchNormalization(64)
            self.deconv1 = L.DeconvolutionND(ndim=3, in_channels=64, out_channels=64, ksize=2, stride=2, initialW=w,nobias=True)

            self.cd1 = L.ConvolutionND(ndim=3, in_channels=32+64, out_channels=32, ksize=3, pad=1, initialW=w)
            self.bnd1 = L.BatchNormalization(32)
            self.cd0 = L.ConvolutionND(ndim=3, in_channels=32, out_channels=32, ksize=3, pad=1, initialW=w)
            self.bnd0 = L.BatchNormalization(32)
            self.lcl = L.ConvolutionND(ndim=3, in_channels=32, out_channels=num_of_label, ksize=1, pad=0, initialW=w)
Ejemplo n.º 12
0
    def __init__(self,
                 mask,
                 channels: Sequence[int],
                 n_conv_per_block,
                 pca_dim=1,
                 init_n_blocks=None,
                 with_reconstruction_loss=True,
                 with_pca=False,
                 with_pca_loss=False,
                 debug=False):
        super().__init__()
        try:
            assert len(channels) >= 2
        except AssertionError as e:
            print("len(channels) must be equal or more than 2")
            raise e

        if init_n_blocks is None:
            init_n_blocks = len(channels)

        self.mask = mask
        self.channels = channels
        self.tmp_n_blocks = init_n_blocks
        self.n_blocks = len(self.channels)
        self.n_conv_per_block = n_conv_per_block
        self.loss_const = self.mask.size / self.mask.sum()

        self.decoder_backprop_enabled = [True] * self.n_blocks
        self.encoder_backprop_enabled = [True] * self.n_blocks

        self.pca_attached = with_pca
        self.pca_block_idx = self.tmp_n_blocks - 1

        self.pca_dim = pca_dim

        self.pca_loss_attached = with_pca_loss
        self.reconstruction_loss_attached = with_reconstruction_loss

        self.debug = debug

        with self.init_scope():
            # ConvolutionND(dim, in_channel, out_channel, kernel, stride, padding)
            for block_idx in range(self.n_blocks):
                for conv_idx in range(self.n_conv_per_block):
                    if conv_idx == 0:
                        in_channel = 1 if block_idx == 0 else self.channels[
                            block_idx - 1]
                    else:
                        in_channel = self.channels[block_idx]
                    out_channel = self.channels[block_idx]
                    nobias = False if block_idx == 0 and conv_idx == 0 else True  # 入力層はnobias=False
                    self.add_link(
                        "conv_{}_{}".format(block_idx, conv_idx),
                        L.ConvolutionND(3,
                                        in_channel,
                                        out_channel, (3, 3, 3),
                                        1,
                                        1,
                                        nobias=nobias))
            # self.pca = PCA(self.channels[self.pca_block_idx], self.pca_dim)
            self.pca = PCA(self.pca_dim)
            for block_idx in reversed(range(self.n_blocks)):
                for conv_idx in range(self.n_conv_per_block):
                    in_channel = self.channels[block_idx]
                    if conv_idx == self.n_conv_per_block - 1:
                        out_channel = 1 if block_idx == 0 else self.channels[
                            block_idx - 1]
                    else:
                        out_channel = self.channels[block_idx]
                    nobias = False if block_idx == 0 and conv_idx == self.n_conv_per_block - 1 else True  # 出力層はnobias=False
                    self.add_link(
                        "dcnv_{}_{}".format(block_idx, conv_idx),
                        L.ConvolutionND(3,
                                        in_channel,
                                        out_channel, (3, 3, 3),
                                        1,
                                        1,
                                        nobias=nobias))
Ejemplo n.º 13
0
 def __init__(self, cin, cout, k, s, p, batch_norm=True):
     super(Conv3d, self).__init__()
     with self.init_scope():
         self.conv = L.ConvolutionND(3, cin, cout, ksize=k, stride=s, pad=p)
         self.bn = L.BatchNormalization(cout) if batch_norm else None
Ejemplo n.º 14
0
 def __init__(self, in_channels=1, n_classes=4):
     init = chainer.initializers.HeNormal(scale=0.01)
     super(VoxResNet,
           self).__init__(conv1a=L.ConvolutionND(3,
                                                 in_channels,
                                                 32,
                                                 3,
                                                 pad=1,
                                                 initialW=init),
                          bnorm1a=L.BatchNormalization(32),
                          conv1b=L.ConvolutionND(3,
                                                 32,
                                                 32,
                                                 3,
                                                 pad=1,
                                                 initialW=init),
                          bnorm1b=L.BatchNormalization(32),
                          conv1c=L.ConvolutionND(3,
                                                 32,
                                                 64,
                                                 3,
                                                 stride=2,
                                                 pad=1,
                                                 initialW=init),
                          voxres2=VoxResModule(),
                          voxres3=VoxResModule(),
                          bnorm3=L.BatchNormalization(64),
                          conv4=L.ConvolutionND(3,
                                                64,
                                                64,
                                                3,
                                                stride=2,
                                                pad=1,
                                                initialW=init),
                          voxres5=VoxResModule(),
                          voxres6=VoxResModule(),
                          bnorm6=L.BatchNormalization(64),
                          conv7=L.ConvolutionND(3,
                                                64,
                                                64,
                                                3,
                                                stride=2,
                                                pad=1,
                                                initialW=init),
                          voxres8=VoxResModule(),
                          voxres9=VoxResModule(),
                          c1deconv=L.DeconvolutionND(3,
                                                     32,
                                                     32,
                                                     3,
                                                     pad=1,
                                                     initialW=init),
                          c1conv=L.ConvolutionND(3,
                                                 32,
                                                 n_classes,
                                                 3,
                                                 pad=1,
                                                 initialW=init),
                          c2deconv=L.DeconvolutionND(3,
                                                     64,
                                                     64,
                                                     4,
                                                     stride=2,
                                                     pad=1,
                                                     initialW=init),
                          c2conv=L.ConvolutionND(3,
                                                 64,
                                                 n_classes,
                                                 3,
                                                 pad=1,
                                                 initialW=init),
                          c3deconv=L.DeconvolutionND(3,
                                                     64,
                                                     64,
                                                     6,
                                                     stride=4,
                                                     pad=1,
                                                     initialW=init),
                          c3conv=L.ConvolutionND(3,
                                                 64,
                                                 n_classes,
                                                 3,
                                                 pad=1,
                                                 initialW=init),
                          c4deconv=L.DeconvolutionND(3,
                                                     64,
                                                     64,
                                                     10,
                                                     stride=8,
                                                     pad=1,
                                                     initialW=init),
                          c4conv=L.ConvolutionND(3,
                                                 64,
                                                 n_classes,
                                                 3,
                                                 pad=1,
                                                 initialW=init))
Ejemplo n.º 15
0
    def __init__(self, ch=64):
        w = chainer.initializers.Normal(scale=0.02)  #Inspired by DCGAN
        super(Discriminator, self).__init__()
        with self.init_scope():
            self.conv1 = L.ConvolutionND(ndim=3,
                                         in_channels=1,
                                         out_channels=ch,
                                         ksize=3,
                                         stride=1,
                                         pad=1,
                                         initialW=w)

            self.conv2 = L.ConvolutionND(ndim=3,
                                         in_channels=ch,
                                         out_channels=ch,
                                         ksize=3,
                                         stride=2,
                                         pad=1,
                                         initialW=w)
            self.bn1 = L.BatchNormalization(ch)

            self.conv3 = L.ConvolutionND(ndim=3,
                                         in_channels=ch,
                                         out_channels=ch * 2,
                                         ksize=3,
                                         stride=1,
                                         pad=1,
                                         initialW=w)
            self.bn2 = L.BatchNormalization(ch * 2)

            self.conv4 = L.ConvolutionND(ndim=3,
                                         in_channels=ch * 2,
                                         out_channels=ch * 2,
                                         ksize=3,
                                         stride=2,
                                         pad=1,
                                         initialW=w)
            self.bn3 = L.BatchNormalization(ch * 2)

            self.conv5 = L.ConvolutionND(ndim=3,
                                         in_channels=ch * 2,
                                         out_channels=ch * 4,
                                         ksize=3,
                                         stride=1,
                                         pad=1,
                                         initialW=w)
            self.bn4 = L.BatchNormalization(ch * 4)

            self.conv6 = L.ConvolutionND(ndim=3,
                                         in_channels=ch * 4,
                                         out_channels=ch * 4,
                                         ksize=3,
                                         stride=2,
                                         pad=1,
                                         initialW=w)
            self.bn5 = L.BatchNormalization(ch * 4)

            self.conv7 = L.ConvolutionND(ndim=3,
                                         in_channels=ch * 4,
                                         out_channels=ch * 8,
                                         ksize=3,
                                         stride=1,
                                         pad=1,
                                         initialW=w)
            self.bn6 = L.BatchNormalization(ch * 8)

            self.conv8 = L.ConvolutionND(ndim=3,
                                         in_channels=ch * 8,
                                         out_channels=ch * 8,
                                         ksize=3,
                                         stride=2,
                                         pad=1,
                                         initialW=w)
            self.bn7 = L.BatchNormalization(ch * 8)

            self.fc1 = L.Linear(None, ch * 16, initialW=w)
            self.fc2 = L.Linear(ch * 16, 1, initialW=w)
Ejemplo n.º 16
0
    def __init__(self,
                 n_dims,
                 in_channels,
                 hidden_channels,
                 out_channel,
                 kernel_size=3,
                 initialW=initializers.HeNormal(),
                 initial_bias=None,
                 block_type='default',
                 is_residual=False,
                 batch_norm=False):

        self.n_dims = n_dims
        self.in_channels = in_channels
        self.hidden_channels = hidden_channels
        self.out_channel = out_channel
        self.kernel_size = kernel_size
        self.initialW = initialW
        self.initial_bias = initial_bias
        self.block_type = block_type
        self.is_residual = is_residual
        self.batch_norm = batch_norm

        pad = self.kernel_size // 2 if self.kernel_size % 2 == 0 else (
            self.kernel_size - 1) // 2

        super().__init__()

        with self.init_scope():

            if self.block_type == 'default':
                self.conv_1 = L.ConvolutionND(self.n_dims,
                                              self.in_channels,
                                              self.hidden_channels,
                                              self.kernel_size,
                                              stride=1,
                                              pad=pad,
                                              initialW=self.initialW,
                                              initial_bias=self.initial_bias)
                self.conv_2 = L.ConvolutionND(self.n_dims,
                                              self.hidden_channels,
                                              self.out_channel,
                                              self.kernel_size,
                                              stride=1,
                                              pad=pad,
                                              initialW=self.initialW,
                                              initial_bias=self.initial_bias)

            elif self.block_type == 'dilated':
                assert self.n_dims != 2, 'Currently, dilated convolution is unsupported in 3D.'
                self.conv_1 = L.DilatedConvolution2D(
                    self.in_channels,
                    self.hidden_channels,
                    self.kernel_size,
                    stride=1,
                    pad=pad,
                    dilate=1,
                    initialW=self.initialW,
                    initial_bias=self.initial_bias)
                self.conv_2 = L.DilatedConvolution2D(
                    self.hidden_channels,
                    self.out_channel,
                    self.kernel_size,
                    stride=1,
                    pad=pad * 2,
                    dilate=2,
                    initialW=self.initialW,
                    initial_bias=self.initial_bias)

            elif self.block_type == 'mlp':
                assert self.n_dims != 2, 'Currently, mlp convolution is unsupported in 3D.'
                self.conv_1 = L.MLPConvolution2D(self.in_channels,
                                                 [self.hidden_channels] * 3,
                                                 self.kernel_size,
                                                 stride=1,
                                                 pad=pad,
                                                 conv_init=self.initialW,
                                                 bias_init=self.initial_bias)
                self.conv_2 = L.MLPConvolution2D(self.hidden_channels,
                                                 [self.out_channel] * 3,
                                                 self.kernel_size,
                                                 stride=1,
                                                 pad=pad,
                                                 conv_init=self.initialW,
                                                 bias_init=self.initial_bias)

            if self.batch_norm:
                self.bn_conv_1 = L.BatchNormalization(self.hidden_channels)
                self.bn_conv_2 = L.BatchNormalization(self.out_channel)
Ejemplo n.º 17
0
 def __init__(self,
              ndim=3,
              n_class=2,
              init_channel=2,
              kernel_size=3,
              pool_size=2,
              ap_factor=2,
              gpu=-1,
              class_weight=np.array([1, 1]).astype(np.float32),
              loss_func='F.softmax_cross_entropy'):
     self.gpu = gpu
     self.pool_size = pool_size
     if gpu >= 0:
         self.class_weight = cuda.to_gpu(
             np.array(class_weight).astype(np.float32))
     else:
         self.class_weight = np.array(class_weight).astype(np.float32)
     self.train = True
     self.loss_func = loss_func
     initializer = chainer.initializers.HeNormal()
     super(Model_L4, self).__init__(
         c0=L.ConvolutionND(ndim,
                            1,
                            init_channel,
                            kernel_size,
                            1,
                            int(kernel_size / 2),
                            initialW=initializer,
                            initial_bias=None),
         c1=L.ConvolutionND(ndim,
                            init_channel,
                            int(init_channel * (ap_factor**1)),
                            kernel_size,
                            1,
                            int(kernel_size / 2),
                            initialW=initializer,
                            initial_bias=None),
         c2=L.ConvolutionND(ndim,
                            int(init_channel * (ap_factor**1)),
                            int(init_channel * (ap_factor**1)),
                            kernel_size,
                            1,
                            int(kernel_size / 2),
                            initialW=initializer,
                            initial_bias=None),
         c3=L.ConvolutionND(ndim,
                            int(init_channel * (ap_factor**1)),
                            int(init_channel * (ap_factor**2)),
                            kernel_size,
                            1,
                            int(kernel_size / 2),
                            initialW=initializer,
                            initial_bias=None),
         c4=L.ConvolutionND(ndim,
                            int(init_channel * (ap_factor**2)),
                            int(init_channel * (ap_factor**2)),
                            kernel_size,
                            1,
                            int(kernel_size / 2),
                            initialW=initializer,
                            initial_bias=None),
         c5=L.ConvolutionND(ndim,
                            int(init_channel * (ap_factor**2)),
                            int(init_channel * (ap_factor**3)),
                            kernel_size,
                            1,
                            int(kernel_size / 2),
                            initialW=initializer,
                            initial_bias=None),
         c6=L.ConvolutionND(ndim,
                            int(init_channel * (ap_factor**3)),
                            int(init_channel * (ap_factor**3)),
                            kernel_size,
                            1,
                            int(kernel_size / 2),
                            initialW=initializer,
                            initial_bias=None),
         c7=L.ConvolutionND(ndim,
                            int(init_channel * (ap_factor**3)),
                            int(init_channel * (ap_factor**4)),
                            kernel_size,
                            1,
                            int(kernel_size / 2),
                            initialW=initializer,
                            initial_bias=None),
         c8=L.ConvolutionND(ndim,
                            int(init_channel * (ap_factor**4)),
                            int(init_channel * (ap_factor**4)),
                            kernel_size,
                            1,
                            int(kernel_size / 2),
                            initialW=initializer,
                            initial_bias=None),
         c9=L.ConvolutionND(ndim,
                            int(init_channel * (ap_factor**4)),
                            int(init_channel * (ap_factor**5)),
                            kernel_size,
                            1,
                            int(kernel_size / 2),
                            initialW=initializer,
                            initial_bias=None),
         dc0=L.DeconvolutionND(ndim,
                               int(init_channel * (ap_factor**5)),
                               int(init_channel * (ap_factor**5)),
                               self.pool_size,
                               self.pool_size,
                               0,
                               initialW=initializer,
                               initial_bias=None),
         dc1=L.ConvolutionND(ndim,
                             int(init_channel * (ap_factor**4) +
                                 init_channel * (ap_factor**5)),
                             int(init_channel * (ap_factor**4)),
                             kernel_size,
                             1,
                             int(kernel_size / 2),
                             initialW=initializer,
                             initial_bias=None),
         dc2=L.ConvolutionND(ndim,
                             int(init_channel * (ap_factor**4)),
                             int(init_channel * (ap_factor**4)),
                             kernel_size,
                             1,
                             int(kernel_size / 2),
                             initialW=initializer,
                             initial_bias=None),
         dc3=L.DeconvolutionND(ndim,
                               int(init_channel * (ap_factor**4)),
                               int(init_channel * (ap_factor**4)),
                               self.pool_size,
                               self.pool_size,
                               0,
                               initialW=initializer,
                               initial_bias=None),
         dc4=L.ConvolutionND(ndim,
                             int(init_channel * (ap_factor**3) +
                                 init_channel * (ap_factor**4)),
                             int(init_channel * (ap_factor**3)),
                             kernel_size,
                             1,
                             int(kernel_size / 2),
                             initialW=initializer,
                             initial_bias=None),
         dc5=L.ConvolutionND(ndim,
                             int(init_channel * (ap_factor**3)),
                             int(init_channel * (ap_factor**3)),
                             kernel_size,
                             1,
                             int(kernel_size / 2),
                             initialW=initializer,
                             initial_bias=None),
         dc6=L.DeconvolutionND(ndim,
                               int(init_channel * (ap_factor**3)),
                               int(init_channel * (ap_factor**3)),
                               self.pool_size,
                               self.pool_size,
                               0,
                               initialW=initializer,
                               initial_bias=None),
         dc7=L.ConvolutionND(ndim,
                             int(init_channel * (ap_factor**2) +
                                 init_channel * (ap_factor**3)),
                             int(init_channel * (ap_factor**2)),
                             kernel_size,
                             1,
                             int(kernel_size / 2),
                             initialW=initializer,
                             initial_bias=None),
         dc8=L.ConvolutionND(ndim,
                             int(init_channel * (ap_factor**2)),
                             int(init_channel * (ap_factor**2)),
                             kernel_size,
                             1,
                             int(kernel_size / 2),
                             initialW=initializer,
                             initial_bias=None),
         dc9=L.DeconvolutionND(ndim,
                               int(init_channel * (ap_factor**2)),
                               int(init_channel * (ap_factor**2)),
                               self.pool_size,
                               self.pool_size,
                               0,
                               initialW=initializer,
                               initial_bias=None),
         dc10=L.ConvolutionND(ndim,
                              int(init_channel * (ap_factor**1) +
                                  init_channel * (ap_factor**2)),
                              int(init_channel * (ap_factor**1)),
                              kernel_size,
                              1,
                              int(kernel_size / 2),
                              initialW=initializer,
                              initial_bias=None),
         dc11=L.ConvolutionND(ndim,
                              int(init_channel * (ap_factor**1)),
                              int(init_channel * (ap_factor**1)),
                              kernel_size,
                              1,
                              int(kernel_size / 2),
                              initialW=initializer,
                              initial_bias=None),
         dc12=L.ConvolutionND(ndim,
                              int(init_channel * (ap_factor**1)),
                              n_class,
                              1,
                              1,
                              initialW=initializer,
                              initial_bias=None),
         bnc0=L.BatchNormalization(init_channel),
         bnc1=L.BatchNormalization(int(init_channel * (ap_factor**1))),
         bnc2=L.BatchNormalization(int(init_channel * (ap_factor**1))),
         bnc3=L.BatchNormalization(int(init_channel * (ap_factor**2))),
         bnc4=L.BatchNormalization(int(init_channel * (ap_factor**2))),
         bnc5=L.BatchNormalization(int(init_channel * (ap_factor**3))),
         bnc6=L.BatchNormalization(int(init_channel * (ap_factor**3))),
         bnc7=L.BatchNormalization(int(init_channel * (ap_factor**4))),
         bnc8=L.BatchNormalization(int(init_channel * (ap_factor**4))),
         bnc9=L.BatchNormalization(int(init_channel * (ap_factor**5))),
         bndc1=L.BatchNormalization(int(init_channel * (ap_factor**4))),
         bndc2=L.BatchNormalization(int(init_channel * (ap_factor**4))),
         bndc4=L.BatchNormalization(int(init_channel * (ap_factor**3))),
         bndc5=L.BatchNormalization(int(init_channel * (ap_factor**3))),
         bndc7=L.BatchNormalization(int(init_channel * (ap_factor**2))),
         bndc8=L.BatchNormalization(int(init_channel * (ap_factor**2))),
         bndc10=L.BatchNormalization(int(init_channel * (ap_factor**1))),
         bndc11=L.BatchNormalization(int(init_channel * (ap_factor**1))))
Ejemplo n.º 18
0
 def __init__(self):
     super(_CRF, self).__init__(L.ConvolutionND(1, 2, 2, 1, nobias=True))
Ejemplo n.º 19
0
    def __init__(self, in_channel, n_classes):
        self.in_channel = in_channel
        super(UNet3DBN, self).__init__(
            c0=L.ConvolutionND(3,
                               self.in_channel,
                               32,
                               3,
                               1,
                               1,
                               initial_bias=None),
            c1=L.ConvolutionND(3, 32, 64, 3, 1, 1, initial_bias=None),
            c2=L.ConvolutionND(3, 64, 64, 3, 1, 1, initial_bias=None),
            c3=L.ConvolutionND(3, 64, 128, 3, 1, 1, initial_bias=None),
            c4=L.ConvolutionND(3, 128, 128, 3, 1, 1, initial_bias=None),
            c5=L.ConvolutionND(3, 128, 256, 3, 1, 1, initial_bias=None),
            c6=L.ConvolutionND(3, 256, 256, 3, 1, 1, initial_bias=None),
            c7=L.ConvolutionND(3, 256, 512, 3, 1, 1, initial_bias=None),

            # c8=L.ConvolutionND(3, 512, 512, 3, 1, 1, initial_bias=None),
            dc9=L.DeconvolutionND(3, 512, 512, 2, 2, initial_bias=None),
            dc8=L.ConvolutionND(3, 256 + 512, 256, 3, 1, 1, initial_bias=None),
            dc7=L.ConvolutionND(3, 256, 256, 3, 1, 1, initial_bias=None),
            dc6=L.DeconvolutionND(3, 256, 256, 2, 2, initial_bias=None),
            dc5=L.ConvolutionND(3, 128 + 256, 128, 3, 1, 1, initial_bias=None),
            dc4=L.ConvolutionND(3, 128, 128, 3, 1, 1, initial_bias=None),
            dc3=L.DeconvolutionND(3, 128, 128, 2, 2, initial_bias=None),
            dc2=L.ConvolutionND(3, 64 + 128, 64, 3, 1, 1, initial_bias=None),
            dc1=L.ConvolutionND(3, 64, 64, 3, 1, 1, initial_bias=None),
            dc0=L.ConvolutionND(3, 64, n_classes, 1, 1, initial_bias=None),
            bnc0=L.BatchNormalization(32),
            bnc1=L.BatchNormalization(64),
            bnc2=L.BatchNormalization(64),
            bnc3=L.BatchNormalization(128),
            bnc4=L.BatchNormalization(128),
            bnc5=L.BatchNormalization(256),
            bnc6=L.BatchNormalization(256),
            bnc7=L.BatchNormalization(512),
            # bnc8=L.BatchNormalization(512),

            # bnd9=L.BatchNormalization(512),
            bnd8=L.BatchNormalization(256),
            bnd7=L.BatchNormalization(256),
            # bnd6=L.BatchNormalization(256),
            bnd5=L.BatchNormalization(128),
            bnd4=L.BatchNormalization(128),
            # bnd3=L.BatchNormalization(128),
            bnd2=L.BatchNormalization(64),
            bnd1=L.BatchNormalization(64))
        self.train = True
Ejemplo n.º 20
0
    def __init__(self,
                 in_size=[1, 2],
                 encoder_n_units=10,
                 regression_n_units=10,
                 common_out_size=10):
        super(EncoderRegressionModel, self).__init__()
        with self.init_scope():
            initial_w = initializers.HeNormal()
            self.common_out_size = common_out_size

            # Encoder 1
            self.encoder11 = links.Linear(in_size[0],
                                          encoder_n_units,
                                          initialW=initial_w)
            self.encoder12 = links.Linear(encoder_n_units,
                                          encoder_n_units,
                                          initialW=initial_w)
            self.encoder13 = links.Linear(encoder_n_units,
                                          encoder_n_units,
                                          initialW=initial_w)
            self.encoder14 = links.Linear(encoder_n_units,
                                          common_out_size,
                                          initialW=initial_w)

            # Encoder 2
            self.encoder21 = links.Linear(in_size[1],
                                          encoder_n_units,
                                          initialW=initial_w)
            self.encoder22 = links.Linear(encoder_n_units,
                                          encoder_n_units,
                                          initialW=initial_w)
            self.encoder23 = links.Linear(encoder_n_units,
                                          encoder_n_units,
                                          initialW=initial_w)
            self.encoder24 = links.Linear(encoder_n_units,
                                          common_out_size,
                                          initialW=initial_w)

            # Decoder 1
            self.decoder11 = links.Linear(common_out_size,
                                          encoder_n_units,
                                          initialW=initial_w)
            self.decoder12 = links.Linear(encoder_n_units,
                                          encoder_n_units,
                                          initialW=initial_w)
            self.decoder13 = links.Linear(encoder_n_units,
                                          encoder_n_units,
                                          initialW=initial_w)
            self.decoder14 = links.Linear(encoder_n_units,
                                          in_size[0],
                                          initialW=initial_w)

            # Regression 2
            self.conv = links.ConvolutionND(ndim=1,
                                            in_channels=1,
                                            out_channels=regression_n_units,
                                            ksize=common_out_size,
                                            stride=1,
                                            pad=0,
                                            initialW=initial_w)
            self.fc = links.Linear(encoder_n_units, 1)
Ejemplo n.º 21
0
    def __init__(self, label):
        super(UNet3D, self).__init__()
        with self.init_scope():
            self.conv1 = L.ConvolutionND(ndim=3,in_channels=1, out_channels=16, ksize=3)
            self.conv2 = L.ConvolutionND(ndim=3,in_channels=16, out_channels=32, ksize=3)

            self.conv3 = L.ConvolutionND(ndim=3,in_channels=32, out_channels=32, ksize=3)
            self.conv4 = L.ConvolutionND(ndim=3,in_channels=32, out_channels=64, ksize=3)

            self.conv5 = L.ConvolutionND(ndim=3,in_channels=64, out_channels=64, ksize=3)
            self.conv6 = L.ConvolutionND(ndim=3,in_channels=64, out_channels=128, ksize=3)

            self.conv7 = L.ConvolutionND(ndim=3,in_channels=128, out_channels=128, ksize=3)
            self.conv8 = L.ConvolutionND(ndim=3,in_channels=128, out_channels=256, ksize=3)
            
            self.dconv1 = L.DeconvolutionND(ndim=3, in_channels=256, out_channels=256, ksize=2, stride=2)
            self.conv9 = L.ConvolutionND(ndim=3,in_channels=128 + 256, out_channels=128, ksize=3)
            self.conv10 = L.ConvolutionND(ndim=3,in_channels=128, out_channels=128, ksize=3)
            
            self.dconv2 = L.DeconvolutionND(ndim=3, in_channels=128, out_channels=128, ksize=2, stride=2)
            self.conv11 = L.ConvolutionND(ndim=3,in_channels=64 + 128, out_channels=64, ksize=3)
            self.conv12 = L.ConvolutionND(ndim=3,in_channels=64, out_channels=64, ksize=3)

            self.dconv3 = L.DeconvolutionND(ndim=3, in_channels=64, out_channels=64, ksize=2, stride=2)
            self.conv13 = L.ConvolutionND(ndim=3,in_channels=32 + 64, out_channels=32, ksize=3)
            self.conv14 = L.ConvolutionND(ndim=3,in_channels=32, out_channels=32, ksize=3)
            self.conv15 = L.ConvolutionND(ndim=3,in_channels=32, out_channels=label, ksize=1)

            self.bnc0=L.BatchNormalization(16)
            self.bnc1=L.BatchNormalization(32)
            self.bnc2=L.BatchNormalization(32)
            self.bnc3=L.BatchNormalization(64)
            self.bnc4=L.BatchNormalization(64)
            self.bnc5=L.BatchNormalization(128)
            self.bnc6=L.BatchNormalization(128)
            self.bnc7=L.BatchNormalization(256)
            # bnc8=L.BatchNormalization(512)

            # bnd9=L.BatchNormalization(512)
            self.bnd8=L.BatchNormalization(128)
            self.bnd7=L.BatchNormalization(128)
            # bnd6=L.BatchNormalization(256)
            self.bnd5=L.BatchNormalization(64)
            self.bnd4=L.BatchNormalization(64)
            # bnd3=L.BatchNormalization(128)
            self.bnd2=L.BatchNormalization(32)
            self.bnd1=L.BatchNormalization(32)
            self.train = True
Ejemplo n.º 22
0
    def __init__(self, vocab_size, n_labels, word_vocab_size, n_pos, out_size,
                 dropout, args):
        self.args = args
        sent_len = 4096  #args.maxlen
        out_channels = int(sent_len * 2)
        super().__init__(
            embed_tri=L.EmbedID(vocab_size[0], out_size, ignore_label=-1),
            embed_four=L.EmbedID(vocab_size[1], out_size, ignore_label=-1),
            embed_word=L.EmbedID(word_vocab_size, out_size, ignore_label=-1),
            embed_pos=L.EmbedID(n_pos, out_size, ignore_label=-1),

            # Block 1
            bn1=L.BatchNormalization(out_size),
            conv1=L.ConvolutionND(ndim=1,
                                  in_channels=out_size,
                                  out_channels=out_size,
                                  ksize=3,
                                  stride=2,
                                  cover_all=True),
            bn2=L.BatchNormalization(out_size),
            conv2=L.ConvolutionND(ndim=1,
                                  in_channels=out_size,
                                  out_channels=out_size,
                                  ksize=2,
                                  stride=2,
                                  cover_all=True),

            # Block 2
            bn3=L.BatchNormalization(out_size * 2),
            conv3=L.ConvolutionND(ndim=1,
                                  in_channels=out_size * 2,
                                  out_channels=out_size * 2,
                                  ksize=2,
                                  stride=2,
                                  cover_all=True),
            bn4=L.BatchNormalization(out_size * 2),
            conv4=L.ConvolutionND(ndim=1,
                                  in_channels=out_size * 2,
                                  out_channels=out_size * 2,
                                  ksize=2,
                                  stride=2,
                                  cover_all=True),

            # Block 3
            bn5=L.BatchNormalization(out_size * 4),
            conv5=L.ConvolutionND(ndim=1,
                                  in_channels=out_size * 4,
                                  out_channels=out_size * 4,
                                  ksize=2,
                                  stride=2,
                                  cover_all=True),
            bn6=L.BatchNormalization(out_size * 4),
            conv6=L.ConvolutionND(ndim=1,
                                  in_channels=out_size * 4,
                                  out_channels=out_size * 4,
                                  ksize=2,
                                  stride=2,
                                  cover_all=True),

            # Block 4
            bn7=L.BatchNormalization(out_size * 8),
            conv7=L.ConvolutionND(ndim=1,
                                  in_channels=out_size * 8,
                                  out_channels=out_size * 8,
                                  ksize=2,
                                  stride=2,
                                  cover_all=True),
            bn8=L.BatchNormalization(out_size * 8),
            conv8=L.ConvolutionND(ndim=1,
                                  in_channels=out_size * 8,
                                  out_channels=out_size * 8,
                                  ksize=2,
                                  stride=2,
                                  cover_all=True),

            # Block 1
            bn1_b=L.BatchNormalization(out_size),
            conv1_b=L.ConvolutionND(ndim=1,
                                    in_channels=out_size,
                                    out_channels=out_size,
                                    ksize=3,
                                    stride=2,
                                    cover_all=True),
            bn2_b=L.BatchNormalization(out_size),
            conv2_b=L.ConvolutionND(ndim=1,
                                    in_channels=out_size,
                                    out_channels=out_size,
                                    ksize=2,
                                    stride=2,
                                    cover_all=True),

            # Block 2
            bn3_b=L.BatchNormalization(out_size * 2),
            conv3_b=L.ConvolutionND(ndim=1,
                                    in_channels=out_size * 2,
                                    out_channels=out_size * 2,
                                    ksize=2,
                                    stride=2,
                                    cover_all=True),
            bn4_b=L.BatchNormalization(out_size * 2),
            conv4_b=L.ConvolutionND(ndim=1,
                                    in_channels=out_size * 2,
                                    out_channels=out_size * 2,
                                    ksize=2,
                                    stride=2,
                                    cover_all=True),

            # Block 3
            bn5_b=L.BatchNormalization(out_size * 4),
            conv5_b=L.ConvolutionND(ndim=1,
                                    in_channels=out_size * 4,
                                    out_channels=out_size * 4,
                                    ksize=2,
                                    stride=2,
                                    cover_all=True),
            bn6_b=L.BatchNormalization(out_size * 4),
            conv6_b=L.ConvolutionND(ndim=1,
                                    in_channels=out_size * 4,
                                    out_channels=out_size * 4,
                                    ksize=2,
                                    stride=2,
                                    cover_all=True),

            # Block 4
            bn7_b=L.BatchNormalization(out_size * 8),
            conv7_b=L.ConvolutionND(ndim=1,
                                    in_channels=out_size * 8,
                                    out_channels=out_size * 8,
                                    ksize=2,
                                    stride=2,
                                    cover_all=True),
            bn8_b=L.BatchNormalization(out_size * 8),
            conv8_b=L.ConvolutionND(ndim=1,
                                    in_channels=out_size * 8,
                                    out_channels=out_size * 8,
                                    ksize=2,
                                    stride=2,
                                    cover_all=True),

            # Block 1
            bn1_w=L.BatchNormalization(out_size),
            conv1_w=L.ConvolutionND(ndim=1,
                                    in_channels=out_size,
                                    out_channels=out_size,
                                    ksize=2,
                                    stride=2,
                                    cover_all=True),
            bn2_w=L.BatchNormalization(out_size),
            conv2_w=L.ConvolutionND(ndim=1,
                                    in_channels=out_size,
                                    out_channels=out_size,
                                    ksize=2,
                                    stride=2,
                                    cover_all=True),

            # Block 2
            bn3_w=L.BatchNormalization(out_size * 2),
            conv3_w=L.ConvolutionND(ndim=1,
                                    in_channels=out_size * 2,
                                    out_channels=out_size * 2,
                                    ksize=2,
                                    stride=2,
                                    cover_all=True),
            bn4_w=L.BatchNormalization(out_size * 2),
            conv4_w=L.ConvolutionND(ndim=1,
                                    in_channels=out_size * 2,
                                    out_channels=out_size * 2,
                                    ksize=2,
                                    stride=2,
                                    cover_all=True),

            # Block 1
            bn1_p=L.BatchNormalization(out_size),
            conv1_p=L.ConvolutionND(ndim=1,
                                    in_channels=out_size,
                                    out_channels=out_size,
                                    ksize=2,
                                    stride=2,
                                    cover_all=True),
            bn2_p=L.BatchNormalization(out_size),
            conv2_p=L.ConvolutionND(ndim=1,
                                    in_channels=out_size,
                                    out_channels=out_size,
                                    ksize=2,
                                    stride=2,
                                    cover_all=True),

            # Block 2
            bn3_p=L.BatchNormalization(out_size * 2),
            conv3_p=L.ConvolutionND(ndim=1,
                                    in_channels=out_size * 2,
                                    out_channels=out_size * 2,
                                    ksize=2,
                                    stride=2,
                                    cover_all=True),
            bn4_p=L.BatchNormalization(out_size * 2),
            conv4_p=L.ConvolutionND(ndim=1,
                                    in_channels=out_size * 2,
                                    out_channels=out_size * 2,
                                    ksize=2,
                                    stride=2,
                                    cover_all=True),
            fcb1=L.Linear(None, 1024),
            fcb2=L.Linear(1024, 128),
            # Fully connected
            #fc3 = L.Linear(None, 2048),
            #fctri  = L.Linear(None, 2048),
            fcfour=L.Linear(None, 2048),
            fcword=L.Linear(None, 1024),
            #fcpos = L.Linear(None, 1024),
            #fc4 = L.Linear(None, 1024),
            fc5=L.Linear(None, 1024),
            fc6=L.Linear(None, 256),
            fc7=L.Linear(None, n_labels),
        )
        self.dropout = dropout
        self.train = True
        self.first = True
Ejemplo n.º 23
0
    def __init__(self, label):
        super(UNet3D, self).__init__()
        with self.init_scope():
            #encorder pass
            self.conv1 = L.ConvolutionND(ndim=3,
                                         in_channels=1,
                                         out_channels=8,
                                         ksize=3,
                                         pad=1)
            self.bnc0 = L.BatchNormalization(8)
            self.conv2 = L.ConvolutionND(ndim=3,
                                         in_channels=8,
                                         out_channels=16,
                                         ksize=3,
                                         pad=1)
            self.bnc1 = L.BatchNormalization(16)

            self.conv3 = L.ConvolutionND(ndim=3,
                                         in_channels=16,
                                         out_channels=16,
                                         ksize=3,
                                         pad=1)
            self.bnc2 = L.BatchNormalization(16)
            self.conv4 = L.ConvolutionND(ndim=3,
                                         in_channels=16,
                                         out_channels=32,
                                         ksize=3,
                                         pad=1)
            self.bnc3 = L.BatchNormalization(32)

            self.conv5 = L.ConvolutionND(ndim=3,
                                         in_channels=32,
                                         out_channels=32,
                                         ksize=3,
                                         pad=1)
            self.bnc4 = L.BatchNormalization(32)
            self.conv6 = L.ConvolutionND(ndim=3,
                                         in_channels=32,
                                         out_channels=64,
                                         ksize=3,
                                         pad=1)
            self.bnc5 = L.BatchNormalization(64)

            #decorder pass
            self.dconv1 = L.DeconvolutionND(ndim=3,
                                            in_channels=64,
                                            out_channels=64,
                                            ksize=2,
                                            stride=2)
            self.conv7 = L.ConvolutionND(ndim=3,
                                         in_channels=32 + 64,
                                         out_channels=32,
                                         ksize=3,
                                         pad=1)
            self.bnd4 = L.BatchNormalization(32)
            self.conv8 = L.ConvolutionND(ndim=3,
                                         in_channels=32,
                                         out_channels=32,
                                         ksize=3,
                                         pad=1)
            self.bnd3 = L.BatchNormalization(32)

            self.dconv2 = L.DeconvolutionND(ndim=3,
                                            in_channels=32,
                                            out_channels=32,
                                            ksize=2,
                                            stride=2)
            self.conv9 = L.ConvolutionND(ndim=3,
                                         in_channels=16 + 32,
                                         out_channels=16,
                                         ksize=3,
                                         pad=1)
            self.bnd2 = L.BatchNormalization(16)
            self.conv10 = L.ConvolutionND(ndim=3,
                                          in_channels=16,
                                          out_channels=16,
                                          ksize=3,
                                          pad=1)
            self.bnd1 = L.BatchNormalization(16)
            self.lcl = L.ConvolutionND(ndim=3,
                                       in_channels=16,
                                       out_channels=label,
                                       ksize=1,
                                       pad=0)
Ejemplo n.º 24
0
    def __init__(self, in_channel, n_classes):
        self.in_channel = in_channel
        super(UNet3D, self).__init__(
            c0=L.ConvolutionND(3, self.in_channel, 32, 3, 1, 1, initial_bias=None),
            c1=L.ConvolutionND(3, 32, 64, 3, 1, 1, initial_bias=None),

            c2=L.ConvolutionND(3, 64, 64, 3, 1, 1, initial_bias=None),
            c3=L.ConvolutionND(3, 64, 128, 3, 1, 1, initial_bias=None),

            c4=L.ConvolutionND(3, 128, 128, 3, 1, 1, initial_bias=None),
            c5=L.ConvolutionND(3, 128, 256, 3, 1, 1, initial_bias=None),

            c6=L.ConvolutionND(3, 256, 256, 3, 1, 1, initial_bias=None),
            c7=L.ConvolutionND(3, 256, 512, 3, 1, 1, initial_bias=None),

            dc9=L.DeconvolutionND(3, 512, 512, 2, 2, initial_bias=None),
            dc8=L.ConvolutionND(3, 256 + 512, 256, 3, 1, 1, initial_bias=None),
            dc7=L.ConvolutionND(3, 256, 256, 3, 1, 1, initial_bias=None),

            dc6=L.DeconvolutionND(3, 256, 256, 2, 2, initial_bias=None),
            dc5=L.ConvolutionND(3, 128 + 256, 128, 3, 1, 1, initial_bias=None),
            dc4=L.ConvolutionND(3, 128, 128, 3, 1, 1, initial_bias=None),

            dc3=L.DeconvolutionND(3, 128, 128, 2, 2, initial_bias=None),
            dc2=L.ConvolutionND(3, 64 + 128, 64, 3, 1, 1, initial_bias=None),
            dc1=L.ConvolutionND(3, 64, 64, 3, 1, 1, initial_bias=None),

            dc0=L.ConvolutionND(3, 64, n_classes, 1, 1, initial_bias=None),

        )
        self.train = True
Ejemplo n.º 25
0
    def __init__(self, vocab, vocab_ngram_tokens, n_units, n_units_char,
                 dropout, subword):  # dropout ratio, zero indicates no dropout
        super(CNN1D, self).__init__()
        with self.init_scope():
            self.subword = subword
            # n_units_char = 15
            self.embed = L.EmbedID(
                len(vocab_ngram_tokens.lst_words) + 2,
                n_units_char,
                initialW=I.Uniform(1. / n_units_char)
            )  # ngram tokens embedding  plus 2 for OOV and end symbol.

            self.n_ngram = vocab_ngram_tokens.metadata[
                "max_gram"] - vocab_ngram_tokens.metadata["min_gram"] + 1

            # n_filters = {i: min(200, i * 5) for i in range(1, 1 + 1)}
            # self.cnns = (L.Convolution2D(1, v, (k, n_units_char),) for k, v in n_filters.items())
            # self.out = L.Linear(sum([v for k, v in n_filters.items()]), n_units)
            if 'small' in self.subword:
                self.cnn1 = L.ConvolutionND(
                    1,
                    n_units_char,
                    50,
                    (1, ),
                )
                self.out = L.Linear(50, n_units)
            else:
                self.cnn1 = L.ConvolutionND(
                    1,
                    n_units_char,
                    50,
                    (1, ),
                )
                self.cnn2 = L.ConvolutionND(
                    1,
                    n_units_char,
                    100,
                    (2, ),
                )
                self.cnn3 = L.ConvolutionND(
                    1,
                    n_units_char,
                    150,
                    (3, ),
                )
                self.cnn4 = L.ConvolutionND(
                    1,
                    n_units_char,
                    200,
                    (4, ),
                )
                self.cnn5 = L.ConvolutionND(
                    1,
                    n_units_char,
                    200,
                    (5, ),
                )
                self.cnn6 = L.ConvolutionND(
                    1,
                    n_units_char,
                    200,
                    (6, ),
                )
                self.cnn7 = L.ConvolutionND(
                    1,
                    n_units_char,
                    200,
                    (7, ),
                )
                self.out = L.Linear(1100, n_units)

            self.dropout = dropout
            self.vocab = vocab
            self.vocab_ngram_tokens = vocab_ngram_tokens
Ejemplo n.º 26
0
    def __init__(self, dimz, gf_dim=512, lamda=0.1):
        self.dimz = dimz
        self.gf_dim = gf_dim
        self.lamda = lamda
        w = chainer.initializers.Normal(0.02)
        super(Generator, self).__init__(
            ### fore img generator
            l_f0=L.Linear(dimz, 4 * 4 * gf_dim // 2 * 2, initialW=w),
            bn_f0=L.BatchNormalization(4 * 4 * gf_dim // 2 * 2),
            dc_f1=CBR3D(gf_dim // 2,
                        gf_dim // 4,
                        bn=True,
                        sample='up',
                        activation=F.relu),
            dc_f2=CBR3D(gf_dim // 4,
                        gf_dim // 8,
                        bn=True,
                        sample='up',
                        activation=F.relu),

            ### back img generator
            l_b0=L.Linear(self.dimz, 4 * 4 * gf_dim, initialW=w),
            bn_b0=L.BatchNormalization(4 * 4 * gf_dim),
            dc_b1=CBR(None,
                      gf_dim // 2,
                      bn=True,
                      sample='up',
                      activation=F.relu),
            dc_b2=CBR(None,
                      gf_dim // 4,
                      bn=True,
                      sample='up',
                      activation=F.relu),
            dc_b3=CBR(None,
                      gf_dim // 8,
                      bn=True,
                      sample='up',
                      activation=F.relu),
            dc_b4=L.Deconvolution2D(None, 3, 4, 2, 1, initialW=w),

            ### flow colorizer w U-net
            c_m1=CBR3D(2,
                       gf_dim // 16,
                       bn=False,
                       sample='down',
                       activation=F.leaky_relu),
            c_m2=CBR3D(gf_dim // 16,
                       gf_dim // 8,
                       bn=True,
                       sample='down',
                       activation=F.leaky_relu),
            c_m3=CBR3D(gf_dim // 4,
                       gf_dim // 4,
                       bn=True,
                       sample='same',
                       activation=F.leaky_relu),
            c_m4=CBR3D(gf_dim // 4,
                       gf_dim // 2,
                       bn=True,
                       sample='down',
                       activation=F.leaky_relu),
            c_m5=CBR3D(gf_dim // 2,
                       gf_dim,
                       bn=True,
                       sample='down',
                       activation=F.leaky_relu),
            dc_m1=CBR3D(gf_dim,
                        gf_dim // 2,
                        bn=True,
                        sample='up',
                        activation=F.relu),
            dc_m2=CBR3D(gf_dim,
                        gf_dim // 4,
                        bn=True,
                        sample='up',
                        activation=F.relu),
            dc_m3=CBR3D(gf_dim // 2,
                        gf_dim // 8,
                        bn=True,
                        sample='up',
                        activation=F.relu),
            dc_mask=L.DeconvolutionND(3, gf_dim // 8, 1, 4, 2, 1, initialW=w),
            dc_m4=CBR3D(gf_dim // 16 * 3,
                        gf_dim // 16,
                        bn=True,
                        sample='up',
                        activation=F.relu),
            dc_m5=L.ConvolutionND(3, gf_dim // 16, 3, 3, 1, 1, initialW=w),
        )
Ejemplo n.º 27
0
    def __init__(self,
                 in_channel,
                 out_channel,
                 n_dims=2,
                 kernel_size=3,
                 n_layers=5,
                 n_filters=64,
                 is_bayesian=False,
                 is_residual=False,
                 initialW=initializers.HeNormal(),
                 initial_bias=None,
                 batch_norm=False,
                 block_type='default',
                 **kwargs):

        self.n_dims = n_dims
        if self.n_dims != 2 and self.n_dims != 3:
            warnings.warn('unsupported number of input dimensions.')

        self.in_channel = in_channel
        self.out_channel = out_channel
        self.n_layers = n_layers
        self.n_filters = n_filters
        self.kernel_size = kernel_size
        self.is_bayesian = is_bayesian
        self.is_residual = is_residual

        self.initialW = initialW
        self.initial_bias = initial_bias
        self.block_type = block_type
        self.batch_norm = batch_norm

        chainer.Chain.__init__(self)

        with self.init_scope():
            # down convolution
            for i in range(1, self.n_layers + 1):
                if i == 1:
                    setattr(
                        self, 'down_unet_block_%d' % i,
                        UNetBlock(self.n_dims,
                                  self.in_channel,
                                  self.n_filters * (2**(i - 1)),
                                  self.n_filters * (2**(i - 1)),
                                  self.kernel_size,
                                  initialW=initialW,
                                  initial_bias=initial_bias,
                                  is_residual=self.is_residual,
                                  block_type=block_type,
                                  batch_norm=batch_norm))
                else:
                    setattr(
                        self, 'down_unet_block_%d' % i,
                        UNetBlock(self.n_dims,
                                  self.n_filters * (2**(i - 2)),
                                  self.n_filters * (2**(i - 1)),
                                  self.n_filters * (2**(i - 1)),
                                  self.kernel_size,
                                  initialW=initialW,
                                  initial_bias=initial_bias,
                                  is_residual=self.is_residual,
                                  block_type=block_type,
                                  batch_norm=batch_norm))

            # up convolution
            for i in range(1, self.n_layers):
                deconv_n_filters = self['down_unet_block_%d' %
                                        (i + 1)].out_channel
                setattr(
                    self, 'deconv_%d' % i,
                    L.DeconvolutionND(self.n_dims,
                                      deconv_n_filters,
                                      deconv_n_filters,
                                      self.kernel_size,
                                      stride=2,
                                      pad=0,
                                      initialW=initialW,
                                      initial_bias=initial_bias))

                if self.batch_norm:
                    setattr(self, 'bn_deconv_%d' % i,
                            L.BatchNormalization(deconv_n_filters))

                upconv_n_filters = self['down_unet_block_%d' %
                                        i].out_channel + self['deconv_%d' %
                                                              i].W.shape[1]
                setattr(
                    self, 'up_unet_block_%d' % i,
                    UNetBlock(self.n_dims,
                              upconv_n_filters,
                              self.n_filters * (2**(i - 1)),
                              self.n_filters * (2**(i - 1)),
                              self.kernel_size,
                              initialW=initialW,
                              initial_bias=initial_bias,
                              is_residual=self.is_residual,
                              block_type=block_type,
                              batch_norm=batch_norm))

                if i == 1:  # output layer
                    setattr(
                        self, 'up_conv%d_3' % i,
                        L.ConvolutionND(self.n_dims,
                                        self.n_filters * (2**(i - 1)),
                                        self.out_channel,
                                        ksize=self.kernel_size,
                                        stride=1,
                                        pad=1,
                                        initialW=initialW,
                                        initial_bias=initial_bias))

            # initialize weights for deconv layer
            for i in range(1, self.n_layers):
                deconv_k_size = self['deconv_%d' % i].W.shape[-1]
                deconv_n_filters = self['deconv_%d' % i].W.shape[1]

                self['deconv_%d' % i].W.data[...] = 0

                if self.n_dims == 2:
                    filt = get_upsampling_filter_2d(deconv_k_size)
                    self['deconv_%d' %
                         i].W.data[range(deconv_n_filters),
                                   range(deconv_n_filters), :, :] = filt
                elif self.n_dims == 3:
                    filt = get_upsampling_filter_3d(deconv_k_size)
                    self['deconv_%d' %
                         i].W.data[range(deconv_n_filters),
                                   range(deconv_n_filters), :, :, :] = filt
Ejemplo n.º 28
0
    def __init__(self, comm, mask, feature_dim,
                 encoder_channels: Sequence[int],
                 encoder_layers: Sequence[int],
                 decoder_channels: Sequence[int],
                 decoder_layers: Sequence[int]):
        super().__init__()

        assert len(encoder_layers) == len(decoder_layers)
        assert len(encoder_channels) == len(encoder_layers)
        assert encoder_layers[0] == 1
        assert len(decoder_channels) == len(decoder_layers)

        self.comm = comm
        self.mask = mask
        self.feature_dim = feature_dim
        self.encoder_channels = encoder_channels
        self.encoder_layers = encoder_layers
        self.decoder_channels = decoder_channels
        self.decoder_layers = decoder_layers
        self.shapes = self.get_shapes(mask.shape, len(encoder_layers))
        self.loss_const = self.mask.size / self.mask.sum()

        with self.init_scope():
            # Encoding Block
            for layer_idx, num_layers in enumerate(encoder_layers):
                for rep_idx in range(num_layers):
                    in_channel = 2 if layer_idx == 0 and rep_idx == 0 else encoder_channels[
                        layer_idx]
                    if rep_idx == num_layers - 1 and layer_idx != len(
                            encoder_layers
                    ) - 1:  # 各layerの出力層,ただしfeatureを出力する層は除く
                        out_channel, stride = encoder_channels[layer_idx +
                                                               1], 2
                    else:
                        out_channel, stride = encoder_channels[layer_idx], 1
                    """
                     def __init__(self, comm, in_channel, out_channel, stride=1):
                    """
                    if layer_idx != 0:
                        self.add_link(
                            "conv_{}_{}".format(layer_idx, rep_idx),
                            ResBlockMN(self.comm, in_channel, out_channel,
                                       stride))
                    else:
                        """
                        def __init__(self, ndim, in_channels, out_channels, ksize, stride=1, pad=0, nobias=False, initialW=None, initial_bias=None, cover_all=False):
                        """
                        self.add_link(
                            "conv_{}_{}".format(layer_idx, rep_idx),
                            L.ConvolutionND(3, in_channel, out_channel, 5,
                                            stride, 2))

            # Extract Block
            """
            def __init__(self, in_size, out_size=None, nobias=False, initialW=None, initial_bias=None):
            """
            self.linear_extract = L.Linear(
                np.prod(
                    self.chain((self.encoder_channels[-1], ),
                               self.shapes[-1])), self.feature_dim)
            self.linear_reconstruct = L.Linear(
                self.feature_dim,
                np.prod(
                    self.chain((self.decoder_channels[-1], ),
                               self.shapes[-1])))

            # Decoding Block
            for layer_idx, num_layers in reversed(
                    tuple(enumerate(decoder_layers))):
                for rep_idx in range(num_layers):
                    if rep_idx == 0 and layer_idx != len(
                            decoder_layers
                    ) - 1:  # 各layerの入力層,ただしfeatureを受け取る層は除く
                        in_channel = decoder_channels[layer_idx + 1]
                    else:
                        in_channel = decoder_channels[layer_idx]

                    out_channel = 1 if layer_idx == 0 and rep_idx == num_layers - 1 else decoder_channels[
                        layer_idx]

                    stride = 1
                    self.add_link(
                        "dcnv_{}_{}".format(layer_idx, rep_idx),
                        ResBlockMN(self.comm, in_channel, out_channel, stride))