def __init__(self, num_of_label): w = chainer.initializers.HeNormal() super(UNet3D, self).__init__() with self.init_scope(): # encoder pass self.ce0 = L.ConvolutionND(ndim=3, in_channels=1, out_channels=16, ksize=3, pad=1,initialW=w) self.bne0 = L.BatchNormalization(16) self.ce1 = L.ConvolutionND(ndim=3, in_channels=16, out_channels=32, ksize=3, pad=1,initialW=w) self.bne1 = L.BatchNormalization(32) self.ce2 = L.ConvolutionND(ndim=3, in_channels=32, out_channels=32, ksize=3, pad=1, initialW=w) self.bne2 = L.BatchNormalization(32) self.ce3 = L.ConvolutionND(ndim=3, in_channels=32, out_channels=64, ksize=3, pad=1, initialW=w) self.bne3 = L.BatchNormalization(64) self.ce4 = L.ConvolutionND(ndim=3, in_channels=64, out_channels=64, ksize=3, pad=1, initialW=w) self.bne4 = L.BatchNormalization(64) # decoder pass self.cd4 = L.ConvolutionND(ndim=3, in_channels=64, out_channels=128, ksize=3, pad=1, initialW=w) self.bnd4 = L.BatchNormalization(128) self.deconv2 = L.DeconvolutionND(ndim=3, in_channels=128, out_channels=128, ksize=2, stride=2, initialW=w, nobias=True) self.cd3 = L.ConvolutionND(ndim=3, in_channels=64+128, out_channels=64, ksize=3, pad=1, initialW=w) self.bnd3 = L.BatchNormalization(64) self.cd2 = L.ConvolutionND(ndim=3, in_channels=64, out_channels=64, ksize=3, pad=1, initialW=w) self.bnd2 = L.BatchNormalization(64) self.deconv1 = L.DeconvolutionND(ndim=3, in_channels=64, out_channels=64, ksize=2, stride=2, initialW=w,nobias=True) self.cd1 = L.ConvolutionND(ndim=3, in_channels=32+64, out_channels=32, ksize=3, pad=1, initialW=w) self.bnd1 = L.BatchNormalization(32) self.cd0 = L.ConvolutionND(ndim=3, in_channels=32, out_channels=32, ksize=3, pad=1, initialW=w) self.bnd0 = L.BatchNormalization(32) self.lcl = L.ConvolutionND(ndim=3, in_channels=32, out_channels=num_of_label, ksize=1, pad=0, initialW=w)
def __init__(self): super(CAE, self).__init__() with self.init_scope(): self.encoder1 = L.ConvolutionND(ndim=2, in_channels=1, out_channels=18, ksize=7, stride=1) self.encoder2 = L.ConvolutionND(ndim=2, in_channels=18, out_channels=45, ksize=5, stride=1, pad=2) self.decoder1 = L.DeconvolutionND(ndim=2, in_channels=45, out_channels=64, ksize=1, stride=1) # , outsize=(61, 41)) self.decoder2 = L.DeconvolutionND(ndim=2, in_channels=64, out_channels=1, ksize=7, stride=1) #, outsize=(128, 88)) self.Linear1 = L.Linear(None, out_size=2048) self.Linear2 = L.Linear(in_size=2048, out_size=1024) self.Linear3 = L.Linear(in_size=1024, out_size=2048) self.decoder3 = L.DeconvolutionND( ndim=2, in_channels=16, out_channels=45, ksize=(15, 13), stride=1, outsize=(30, 20)) # , outsize=(128, 88))
def __init__(self, in_channels=1, n_classes=4): init = chainer.initializers.HeNormal(scale=0.01) super(VoxResNet, self).__init__( conv1a=L.ConvolutionND(3, in_channels, 32, 3, pad=1, initialW=init), bnorm1a=L.BatchNormalization(32), conv1b=L.ConvolutionND(3, 32, 32, 3, pad=1, initialW=init), bnorm1b=L.BatchNormalization(32), conv1c=L.ConvolutionND(3, 32, 64, 3, stride=2, pad=1, initialW=init), voxres2=VoxResModule(64), voxres3=VoxResModule(64), bnorm3=L.BatchNormalization(64), conv4=L.ConvolutionND(3, 64, 64, 3, stride=2, pad=1, initialW=init), voxres5=VoxResModule(64), voxres6=VoxResModule(64), bnorm6=L.BatchNormalization(64), conv7=L.ConvolutionND(3, 64, 64, 3, stride=2, pad=1, initialW=init), voxres8=VoxResModule(64), voxres9=VoxResModule(64), c1deconv=L.DeconvolutionND(3, 32, 32, 3, pad=1, initialW=init), c1conv=L.ConvolutionND(3, 32, n_classes, 3, pad=1, initialW=init), c2deconv=L.DeconvolutionND(3, 64, 64, 4, stride=2, pad=1, initialW=init), c2conv=L.ConvolutionND(3, 64, n_classes, 3, pad=1, initialW=init), c3deconv=L.DeconvolutionND(3, 64, 64, 6, stride=4, pad=1, initialW=init), c3conv=L.ConvolutionND(3, 64, n_classes, 3, pad=1, initialW=init), c4deconv=L.DeconvolutionND(3, 64, 64, 10, stride=8, pad=1, initialW=init), c4conv=L.ConvolutionND(3, 64, n_classes, 3, pad=1, initialW=init) )
def __init__(self, in_channel, n_classes): self.in_channel = in_channel super(UNet3D, self).__init__( c0=L.ConvolutionND(3, self.in_channel, 32, 3, 1, 1, initial_bias=None), c1=L.ConvolutionND(3, 32, 64, 3, 1, 1, initial_bias=None), c2=L.ConvolutionND(3, 64, 64, 3, 1, 1, initial_bias=None), c3=L.ConvolutionND(3, 64, 128, 3, 1, 1, initial_bias=None), c4=L.ConvolutionND(3, 128, 128, 3, 1, 1, initial_bias=None), c5=L.ConvolutionND(3, 128, 256, 3, 1, 1, initial_bias=None), c6=L.ConvolutionND(3, 256, 256, 3, 1, 1, initial_bias=None), c7=L.ConvolutionND(3, 256, 512, 3, 1, 1, initial_bias=None), dc9=L.DeconvolutionND(3, 512, 512, 2, 2, initial_bias=None), dc8=L.ConvolutionND(3, 256 + 512, 256, 3, 1, 1, initial_bias=None), dc7=L.ConvolutionND(3, 256, 256, 3, 1, 1, initial_bias=None), dc6=L.DeconvolutionND(3, 256, 256, 2, 2, initial_bias=None), dc5=L.ConvolutionND(3, 128 + 256, 128, 3, 1, 1, initial_bias=None), dc4=L.ConvolutionND(3, 128, 128, 3, 1, 1, initial_bias=None), dc3=L.DeconvolutionND(3, 128, 128, 2, 2, initial_bias=None), dc2=L.ConvolutionND(3, 64 + 128, 64, 3, 1, 1, initial_bias=None), dc1=L.ConvolutionND(3, 64, 64, 3, 1, 1, initial_bias=None), dc0=L.ConvolutionND(3, 64, n_classes, 1, 1, initial_bias=None), ) self.train = True
def __init__(self, inplanes, gpu): super(hourglass, self).__init__() self.gpu = gpu self.conv1 = Sequential( convbn_3d(inplanes, inplanes * 2, kernel_size=3, stride=2, pad=1), F.relu).to_gpu(self.gpu) self.conv2 = convbn_3d(inplanes * 2, inplanes * 2, kernel_size=3, stride=1, pad=1).to_gpu(self.gpu) self.conv3 = Sequential( convbn_3d(inplanes * 2, inplanes * 2, kernel_size=3, stride=2, pad=1), F.relu).to_gpu(self.gpu) self.conv4 = Sequential( convbn_3d(inplanes * 2, inplanes * 2, kernel_size=3, stride=1, pad=1), F.relu).to_gpu(self.gpu) self.conv5 = Sequential( L.DeconvolutionND(3, inplanes * 2, inplanes * 2, ksize=4, stride=2, pad=1, nobias=True, initialW=ini.Normal(math.sqrt(2. / 32))), L.BatchNormalization(inplanes * 2, eps=1e-5, decay=0.95, initial_gamma=ini.One(), initial_beta=ini.Zero())).to_gpu( self.gpu) # +conv2 self.conv6 = Sequential( L.DeconvolutionND(3, inplanes * 2, inplanes, ksize=4, stride=2, pad=1, nobias=True), L.BatchNormalization(inplanes, eps=1e-5, decay=0.95, initial_gamma=ini.One(), initial_beta=ini.Zero())).to_gpu( self.gpu) # +x
def __init__(self, out_ch=1): layers = {} w = chainer.initializers.Normal(0.02) layers['c0'] = CBR(512, 512, 3, 1, 1, bn=True, activation=F.relu, dropout=True) layers['c1'] = CBR(512, 256, 3, 1, 1, bn=True, activation=F.relu, dropout=True) layers['c2'] = CBR(256, 256, 3, 1, 1, bn=True, activation=F.relu, dropout=True) layers['c3'] = CBR(256, 256, 4, 2, 1, bn=True, activation=F.relu, dropout=False) layers['c4'] = CBR(256, 128, 3, 1, 1, bn=True, activation=F.relu, dropout=False) layers['c5'] = CBR(128, 128, 3, 1, 1, bn=True, activation=F.relu, dropout=False) layers['c6'] = L.DeconvolutionND(3, 128, 64, 4, 2, 1, initialW=w) layers['c7'] = L.DeconvolutionND(3, 64, out_ch, 3, 1, 1, initialW=w) super(Decoder, self).__init__(**layers)
def __init__(self, in_channel, n_classes): self.in_channel = in_channel super(UNet3DBN, self).__init__( c0=L.ConvolutionND(3, self.in_channel, 32, 3, 1, 1, initial_bias=None), c1=L.ConvolutionND(3, 32, 64, 3, 1, 1, initial_bias=None), c2=L.ConvolutionND(3, 64, 64, 3, 1, 1, initial_bias=None), c3=L.ConvolutionND(3, 64, 128, 3, 1, 1, initial_bias=None), c4=L.ConvolutionND(3, 128, 128, 3, 1, 1, initial_bias=None), c5=L.ConvolutionND(3, 128, 256, 3, 1, 1, initial_bias=None), c6=L.ConvolutionND(3, 256, 256, 3, 1, 1, initial_bias=None), c7=L.ConvolutionND(3, 256, 512, 3, 1, 1, initial_bias=None), # c8=L.ConvolutionND(3, 512, 512, 3, 1, 1, initial_bias=None), dc9=L.DeconvolutionND(3, 512, 512, 2, 2, initial_bias=None), dc8=L.ConvolutionND(3, 256 + 512, 256, 3, 1, 1, initial_bias=None), dc7=L.ConvolutionND(3, 256, 256, 3, 1, 1, initial_bias=None), dc6=L.DeconvolutionND(3, 256, 256, 2, 2, initial_bias=None), dc5=L.ConvolutionND(3, 128 + 256, 128, 3, 1, 1, initial_bias=None), dc4=L.ConvolutionND(3, 128, 128, 3, 1, 1, initial_bias=None), dc3=L.DeconvolutionND(3, 128, 128, 2, 2, initial_bias=None), dc2=L.ConvolutionND(3, 64 + 128, 64, 3, 1, 1, initial_bias=None), dc1=L.ConvolutionND(3, 64, 64, 3, 1, 1, initial_bias=None), dc0=L.ConvolutionND(3, 64, n_classes, 1, 1, initial_bias=None), bnc0=L.BatchNormalization(32), bnc1=L.BatchNormalization(64), bnc2=L.BatchNormalization(64), bnc3=L.BatchNormalization(128), bnc4=L.BatchNormalization(128), bnc5=L.BatchNormalization(256), bnc6=L.BatchNormalization(256), bnc7=L.BatchNormalization(512), # bnc8=L.BatchNormalization(512), # bnd9=L.BatchNormalization(512), bnd8=L.BatchNormalization(256), bnd7=L.BatchNormalization(256), # bnd6=L.BatchNormalization(256), bnd5=L.BatchNormalization(128), bnd4=L.BatchNormalization(128), # bnd3=L.BatchNormalization(128), bnd2=L.BatchNormalization(64), bnd1=L.BatchNormalization(64)) self.train = True
def __init__(self, n_frames=16, z_slow_dim=256, z_fast_dim=256): super(FrameSeedGeneratorNoBetaInitDefault, self).__init__() w = None with self.init_scope(): self.dc0 = L.DeconvolutionND(1, z_slow_dim, 512, 1, 1, 0, initialW=w) self.dc1 = L.DeconvolutionND(1, 512, 256, 4, 2, 1, initialW=w) self.dc2 = L.DeconvolutionND(1, 256, 128, 4, 2, 1, initialW=w) self.dc3 = L.DeconvolutionND(1, 128, 128, 4, 2, 1, initialW=w) self.dc4 = L.DeconvolutionND(1, 128, z_fast_dim, 4, 2, 1, initialW=w) self.bn0 = L.BatchNormalization(512, use_beta=False) self.bn1 = L.BatchNormalization(256, use_beta=False) self.bn2 = L.BatchNormalization(128, use_beta=False) self.bn3 = L.BatchNormalization(128, use_beta=False) self.z_slow_dim = z_slow_dim self.z_fast_dim = z_fast_dim
def __init__(self, n_frames=16, z_slow_dim=256, z_fast_dim=256, wscale=0.01): super(FrameSeedGeneratorInitUniform, self).__init__() w = chainer.initializers.Uniform(wscale) with self.init_scope(): self.dc0 = L.DeconvolutionND(1, z_slow_dim, 512, 1, 1, 0, initialW=w) self.dc1 = L.DeconvolutionND(1, 512, 256, 4, 2, 1, initialW=w) self.dc2 = L.DeconvolutionND(1, 256, 128, 4, 2, 1, initialW=w) self.dc3 = L.DeconvolutionND(1, 128, 128, 4, 2, 1, initialW=w) self.dc4 = L.DeconvolutionND(1, 128, z_fast_dim, 4, 2, 1, initialW=w) self.bn0 = L.BatchNormalization(512) self.bn1 = L.BatchNormalization(256) self.bn2 = L.BatchNormalization(128) self.bn3 = L.BatchNormalization(128) self.z_slow_dim = z_slow_dim self.z_fast_dim = z_fast_dim
def __init__(self, video_len=32): w = chainer.initializers.Normal(0.02) self.video_len = video_len super(FlowGenerator, self).__init__( l0=L.Linear(100, 4 * 4 * 512 * (self.video_len // 16), initialW=w), dc1=L.DeconvolutionND(3, 512, 256, 4, 2, 1, initialW=w), dc2=L.DeconvolutionND(3, 256, 128, 4, 2, 1, initialW=w), dc3=L.DeconvolutionND(3, 128, 64, 4, 2, 1, initialW=w), dc_fore=L.DeconvolutionND(3, 64, 2, 4, 2, 1, initialW=w), dc_mask=L.DeconvolutionND(3, 64, 1, 4, 2, 1, initialW=w), bn0=L.BatchNormalization(4 * 4 * 512 * (self.video_len / 16)), bn1=L.BatchNormalization(256), bn2=L.BatchNormalization(128), bn3=L.BatchNormalization(64), )
def __init__(self, label): super(UNet3D, self).__init__() with self.init_scope(): self.conv1 = L.ConvolutionND(ndim=3,in_channels=1, out_channels=16, ksize=3) self.conv2 = L.ConvolutionND(ndim=3,in_channels=16, out_channels=32, ksize=3) self.conv3 = L.ConvolutionND(ndim=3,in_channels=32, out_channels=32, ksize=3) self.conv4 = L.ConvolutionND(ndim=3,in_channels=32, out_channels=64, ksize=3) self.conv5 = L.ConvolutionND(ndim=3,in_channels=64, out_channels=64, ksize=3) self.conv6 = L.ConvolutionND(ndim=3,in_channels=64, out_channels=128, ksize=3) self.conv7 = L.ConvolutionND(ndim=3,in_channels=128, out_channels=128, ksize=3) self.conv8 = L.ConvolutionND(ndim=3,in_channels=128, out_channels=256, ksize=3) self.dconv1 = L.DeconvolutionND(ndim=3, in_channels=256, out_channels=256, ksize=2, stride=2) self.conv9 = L.ConvolutionND(ndim=3,in_channels=128 + 256, out_channels=128, ksize=3) self.conv10 = L.ConvolutionND(ndim=3,in_channels=128, out_channels=128, ksize=3) self.dconv2 = L.DeconvolutionND(ndim=3, in_channels=128, out_channels=128, ksize=2, stride=2) self.conv11 = L.ConvolutionND(ndim=3,in_channels=64 + 128, out_channels=64, ksize=3) self.conv12 = L.ConvolutionND(ndim=3,in_channels=64, out_channels=64, ksize=3) self.dconv3 = L.DeconvolutionND(ndim=3, in_channels=64, out_channels=64, ksize=2, stride=2) self.conv13 = L.ConvolutionND(ndim=3,in_channels=32 + 64, out_channels=32, ksize=3) self.conv14 = L.ConvolutionND(ndim=3,in_channels=32, out_channels=32, ksize=3) self.conv15 = L.ConvolutionND(ndim=3,in_channels=32, out_channels=label, ksize=1) self.bnc0=L.BatchNormalization(16) self.bnc1=L.BatchNormalization(32) self.bnc2=L.BatchNormalization(32) self.bnc3=L.BatchNormalization(64) self.bnc4=L.BatchNormalization(64) self.bnc5=L.BatchNormalization(128) self.bnc6=L.BatchNormalization(128) self.bnc7=L.BatchNormalization(256) # bnc8=L.BatchNormalization(512) # bnd9=L.BatchNormalization(512) self.bnd8=L.BatchNormalization(128) self.bnd7=L.BatchNormalization(128) # bnd6=L.BatchNormalization(256) self.bnd5=L.BatchNormalization(64) self.bnd4=L.BatchNormalization(64) # bnd3=L.BatchNormalization(128) self.bnd2=L.BatchNormalization(32) self.bnd1=L.BatchNormalization(32) self.train = True
def __init__(self, num_in_elements, num_in_channel=1, out_channel=16, filter_size=5, slide_size=1, pooling_size=2, name=None, dropout_rate=0.0): ''' num_in_elements: number of input elements per input channel num_in_channel: number of input channel out_channel: number of output channel filter_size: filter size of convolution slide_size: slide size of filter pooling_size: pooling size for output Name: name of this layer dropout_rate: dropout ratio of output layer ''' self.num_in_elements = num_in_elements self.in_channel = num_in_channel self.out_channel = out_channel self.filter_size = filter_size self.slide_size = slide_size self.pooling_size = pooling_size self.dropout_rate = dropout_rate self.num_of_conv_out_elements, self.num_of_pooling_out_elements = self.calc_num_out_elements1D(self.num_in_elements, self.in_channel, self.out_channel, self.filter_size, self.slide_size, self.pooling_size) print('Layer {}: in: {} out: conv out {} pooling out {}'.format(name, self.num_in_elements, self.num_of_conv_out_elements, self.num_of_pooling_out_elements)) super().__init__() with self.init_scope(): self.conv = L.ConvolutionND(1, self.in_channel, self.out_channel, ksize=self.filter_size, stride=self.slide_size) self.dcnv = L.DeconvolutionND(1, self.out_channel, self.in_channel, ksize=self.filter_size, stride=self.slide_size)
def __init__(self, dimz, gf_dim=512, lamda=0.1): self.dimz = dimz self.gf_dim = gf_dim self.lamda = lamda w = chainer.initializers.Normal(0.02) super(Generator, self).__init__( ### fore img generator l_f0=L.Linear(dimz, 4 * 4 * gf_dim // 2 * 2, initialW=w), bn_f0=L.BatchNormalization(4 * 4 * gf_dim // 2 * 2), dc_f1=CBR3D(gf_dim // 2, gf_dim // 4, bn=True, sample='up', activation=F.relu), dc_f2=CBR3D(gf_dim // 4, gf_dim // 8, bn=True, sample='up', activation=F.relu), ### back img generator l_b0=L.Linear(self.dimz, 4 * 4 * gf_dim, initialW=w), bn_b0=L.BatchNormalization(4 * 4 * gf_dim), dc_b1=CBR(None, gf_dim // 2, bn=True, sample='up', activation=F.relu), dc_b2=CBR(None, gf_dim // 4, bn=True, sample='up', activation=F.relu), dc_b3=CBR(None, gf_dim // 8, bn=True, sample='up', activation=F.relu), dc_b4=L.Deconvolution2D(None, 3, 4, 2, 1, initialW=w), ### flow colorizer w U-net c_m1=CBR3D(2, gf_dim // 16, bn=False, sample='down', activation=F.leaky_relu), c_m2=CBR3D(gf_dim // 16, gf_dim // 8, bn=True, sample='down', activation=F.leaky_relu), c_m3=CBR3D(gf_dim // 4, gf_dim // 4, bn=True, sample='same', activation=F.leaky_relu), c_m4=CBR3D(gf_dim // 4, gf_dim // 2, bn=True, sample='down', activation=F.leaky_relu), c_m5=CBR3D(gf_dim // 2, gf_dim, bn=True, sample='down', activation=F.leaky_relu), dc_m1=CBR3D(gf_dim, gf_dim // 2, bn=True, sample='up', activation=F.relu), dc_m2=CBR3D(gf_dim, gf_dim // 4, bn=True, sample='up', activation=F.relu), dc_m3=CBR3D(gf_dim // 2, gf_dim // 8, bn=True, sample='up', activation=F.relu), dc_mask=L.DeconvolutionND(3, gf_dim // 8, 1, 4, 2, 1, initialW=w), dc_m4=CBR3D(gf_dim // 16 * 3, gf_dim // 16, bn=True, sample='up', activation=F.relu), dc_m5=L.ConvolutionND(3, gf_dim // 16, 3, 3, 1, 1, initialW=w), )
def dcnv(in_size: int, kernel: int, stride: int, padding: int): batch = 2 in_channel = 5 out_channel = 3 x = np.arange(batch * in_channel * in_size**3, dtype=np.float32).reshape( (batch, in_channel, in_size, in_size, in_size)) l = L.DeconvolutionND(3, in_channel, out_channel, kernel, stride, padding) y = l(x) return y.shape
def __init__(self, label): super(AutoEncoder, self).__init__() with self.init_scope(): #too complesed? self.conv1 = L.ConvolutionND(ndim=3,in_channels=4, out_channels=5, ksize=3 ,pad=1) self.conv2 = L.ConvolutionND(ndim=3,in_channels=5, out_channels=5, ksize=3 ,pad=1) self.conv3 = L.ConvolutionND(ndim=3,in_channels=5, out_channels=5, ksize=3 ,pad=1) self.conv4 = L.ConvolutionND(ndim=3,in_channels=5, out_channels=5, ksize=3 ,pad=1) self.dconv1 = L.DeconvolutionND(ndim=3, in_channels=5, out_channels=5, ksize=2, stride=2 ,pad=1) self.dconv2 = L.DeconvolutionND(ndim=3, in_channels=4, out_channels=5, ksize=2, stride=2 ,pad=1) self.bn0=L.BatchNormalization(5) self.bn1=L.BatchNormalization(5) self.bn2=L.BatchNormalization(5) self.bn3=L.BatchNormalization(5) self.bn4=L.BatchNormalization(5)
def __init__(self): super().__init__() with self.init_scope(): self.conv1 = L.ConvolutionND(3, 1, 16, (3, 5, 3), 2, 0) self.bnc1 = L.BatchNormalization(16) self.conv2 = L.ConvolutionND(3, 16, 32, (5, 7, 5), 2, 0) self.bnc2 = L.BatchNormalization(32) self.conv3 = L.ConvolutionND(3, 32, 64, (7, 8, 7), 2, 0) self.bnc3 = L.BatchNormalization(64) self.conv4 = L.ConvolutionND(3, 64, 128, (8, 9, 8), 1, 0) self.bnc4 = L.BatchNormalization(128) self.deconv4 = L.DeconvolutionND(3, 128, 64, (8, 9, 8), 1, 0) self.bnd4 = L.BatchNormalization(64) self.deconv3 = L.DeconvolutionND(3, 64, 32, (7, 8, 7), 2, 0) self.bnd3 = L.BatchNormalization(32) self.deconv2 = L.DeconvolutionND(3, 32, 16, (5, 7, 5), 2, 0) self.bnd2 = L.BatchNormalization(16) self.deconv1 = L.DeconvolutionND(3, 16, 1, (3, 5, 3), 2, 0)
def __init__(self, video_len=32): # used as the seed for the weights initialization w = chainer.initializers.Normal(0.02) self.video_len = video_len # Bascially define all the layers of the generator super(Generator, self).__init__( l0=L.Linear(100, 4 * 4 * 512 * (self.video_len // 16), initialW=w), dc1=L.DeconvolutionND(3, 512, 256, 4, 2, 1, initialW=w), dc2=L.DeconvolutionND(3, 256, 128, 4, 2, 1, initialW=w), dc3=L.DeconvolutionND(3, 128, 64, 4, 2, 1, initialW=w), dc_fore=L.DeconvolutionND(3, 64, 2, 4, 2, 1, initialW=w), dc_mask=L.DeconvolutionND(3, 64, 1, 4, 2, 1, initialW=w), bn0=L.BatchNormalization(4 * 4 * 512 * (self.video_len / 16)), bn1=L.BatchNormalization(256), bn2=L.BatchNormalization(128), bn3=L.BatchNormalization(64), )
def __init__(self, nb_inputs, channel_list, ksize_list, no_act_last=False): super(Decoder, self).__init__() self.nb_layers = len(channel_list) self.no_act_last = no_act_last channel_list = channel_list + [nb_inputs] for idx, (nb_in, nb_out, ksize) in enumerate(zip(channel_list[:-1], channel_list[1:], ksize_list[::-1])): self.add_link("deconv{}".format(idx), L.DeconvolutionND(1, nb_in, nb_out, ksize)) if no_act_last and idx == self.nb_layers - 1: continue self.add_link("bn{}".format(idx), L.BatchNormalization(nb_out))
def __init__(self, f, c, fd): super(SSRN, self).__init__( l1=Conv(f, c, 1, 1, False), l2=Highway(c, 3, 1, False), l3=Highway(c, 3, 3, False), l4=L.DeconvolutionND(1, c, c, 2, 2, initialW=chainer.initializers.HeNormal()), l5=Highway(c, 3, 1, False), l6=Highway(c, 3, 3, False), l7=L.DeconvolutionND(1, c, c, 2, 2, initialW=chainer.initializers.HeNormal()), l8=Highway(c, 3, 1, False), l9=Highway(c, 3, 3, False), l10=Conv(c, 2*c, 1, 1, False), l11=Highway(2*c, 3, 1, False), l12=Highway(2*c, 3, 1, False), l13=Conv(2*c, fd, 1, 1, False), l14=Conv(fd, fd, 1, 1, False), l15=Conv(fd, fd, 1, 1, False), l16=Conv(fd, fd, 1, 1, False), )
def __init__(self,in_ch,out_ch,up=False,down=False,activation=F.relu): w=initializers.Normal(0.01) self.up=up self.down=down self.activation=activation super(CBR3D_dis,self).__init__() with self.init_scope(): self.cpara=L.ConvolutionND(3,in_ch,out_ch,3,1,1,initialW=w) self.cdown=L.ConvolutionND(3,in_ch,out_ch,4,(2,2,2),1,initialW=w) self.cup=L.DeconvolutionND(3,in_ch,out_ch,4,(2,2,2),1,initialW=w) self.bn0=L.BatchNormalization(out_ch)
def __init__(self): super(Generator, self).__init__() with self.init_scope(): w = chainer.initializers.Normal(0.01) self.fg_dc0 = L.DeconvolutionND(3, 100, 512, (2, 4, 4), initialW=w) self.fg_dc1 = L.DeconvolutionND(3, 512, 256, 4, 2, 1, initialW=w) self.fg_dc2 = L.DeconvolutionND(3, 256, 128, 4, 2, 1, initialW=w) self.fg_dc3 = L.DeconvolutionND(3, 128, 64, 4, 2, 1, initialW=w) self.fg_dc4 = L.DeconvolutionND(3, 64, 3, 4, 2, 1, initialW=w) self.m_dc4 = L.DeconvolutionND(3, 64, 1, 4, 2, 1, initialW=w) self.fg_bn0 = L.BatchNormalization(512) self.fg_bn1 = L.BatchNormalization(256) self.fg_bn2 = L.BatchNormalization(128) self.fg_bn3 = L.BatchNormalization(64) self.bg_dc0 = L.Deconvolution2D(100, 512, 4, initialW=w) self.bg_dc1 = L.Deconvolution2D(512, 256, 4, 2, 1, initialW=w) self.bg_dc2 = L.Deconvolution2D(256, 128, 4, 2, 1, initialW=w) self.bg_dc3 = L.Deconvolution2D(128, 64, 4, 2, 1, initialW=w) self.bg_dc4 = L.Deconvolution2D(64, 3, 4, 2, 1, initialW=w) self.bg_bn0 = L.BatchNormalization(512) self.bg_bn1 = L.BatchNormalization(256) self.bg_bn2 = L.BatchNormalization(128) self.bg_bn3 = L.BatchNormalization(64)
def __init__(self, train=True): w = chainer.initializers.Normal(0.02) super(CNNAE3D512, self).__init__( c0 = L.Convolution2D(3, 64, 4, stride=2, pad=1,initialW=w), c1 = L.Convolution2D(64, 128, 4, stride=2, pad=1,initialW=w), c2 = L.Convolution2D(128, 256, 4, stride=2, pad=1,initialW=w), c3 = L.Convolution2D(256, 512, 4, stride=2, pad=1,initialW=w), c4 = L.Convolution2D(512, 512, 4, stride=2, pad=1,initialW=w), c5 = L.Convolution2D(512, 512, 4, stride=2, pad=1,initialW=w), c6 = L.Convolution2D(512, 512, 4, stride=2, pad=1,initialW=w), c7 = L.Convolution2D(512, 512, 4, stride=2, pad=1,initialW=w), c8 = L.Convolution2D(512, 512, 4, stride=2, pad=1,initialW=w), dc00 = L.DeconvolutionND(3, 512, 512, (4, 4, 4), stride=(2,2,2), pad=1,initialW=w), dc0 = L.DeconvolutionND(3, 1024, 512, (4, 4, 4), stride=(2,2,2), pad=1,initialW=w), dc1 = L.DeconvolutionND(3, 1024, 512, (4, 4, 4), stride=(2,2,2), pad=1,initialW=w), dc2 = L.DeconvolutionND(3, 1024, 512, (3, 4, 4), stride=(1,2,2), pad=1,initialW=w), dc3 = L.DeconvolutionND(3, 1024, 512, (3, 4, 4), stride=(1,2,2), pad=1,initialW=w), dc4 = L.DeconvolutionND(3, 1024, 256, (3, 4, 4), stride=(1,2,2), pad=1,initialW=w), dc5 = L.DeconvolutionND(3, 512, 128, (3, 4, 4), stride=(1,2,2), pad=1,initialW=w), dc6 = L.DeconvolutionND(3, 256, 64, (3, 4, 4), stride=(1,2,2), pad=1,initialW=w), dc7 = L.DeconvolutionND(3, 128, 3, (3, 4, 4), stride=(1,2,2), pad=1,initialW=w), bnc1 = L.BatchNormalization(128), bnc2 = L.BatchNormalization(256), bnc3 = L.BatchNormalization(512), bnc4 = L.BatchNormalization(512), bnc5 = L.BatchNormalization(512), bnc6 = L.BatchNormalization(512), bnc7 = L.BatchNormalization(512), bnc8 = L.BatchNormalization(512), bndc00 = L.BatchNormalization(512), bndc0 = L.BatchNormalization(512), bndc1 = L.BatchNormalization(512), bndc2 = L.BatchNormalization(512), bndc3 = L.BatchNormalization(512), bndc4 = L.BatchNormalization(256), bndc5 = L.BatchNormalization(128), bndc6 = L.BatchNormalization(64) ) self.train = train self.train_dropout = train
def __init__(self, n_frames=16, z_slow_dim=256, z_fast_dim=256, n_classes=0): super(TemporalGenerator, self).__init__() w = None assert (n_frames == 16) assert (n_classes == 0) self.n_frames = n_frames with self.init_scope(): self.dc0 = L.DeconvolutionND(1, z_slow_dim, 512, 1, 1, 0, initialW=w) self.dc1 = L.DeconvolutionND(1, 512, 256, 4, 2, 1, initialW=w) self.dc2 = L.DeconvolutionND(1, 256, 128, 4, 2, 1, initialW=w) self.dc3 = L.DeconvolutionND(1, 128, 128, 4, 2, 1, initialW=w) self.dc4 = L.DeconvolutionND(1, 128, z_fast_dim, 4, 2, 1, initialW=w) self.bn0 = make_batch_normalization(512, n_classes=0) self.bn1 = make_batch_normalization(256, n_classes=0) self.bn2 = make_batch_normalization(128, n_classes=0) self.bn3 = make_batch_normalization(128, n_classes=0) self.z_slow_dim = z_slow_dim self.z_fast_dim = z_fast_dim
def __init__(self, n_dims, in_ch, out_ch, ksize=4, stride=2, bn=True, sample='down', activation=F.relu, dropout='dropout'): self.use_bn = bn self.activation = activation self.dropout = None if dropout in CBR.dropout else CBR.dropout[dropout] w = chainer.initializers.Normal(0.02) super().__init__() with self.init_scope(): if sample == 'down': self.c = L.ConvolutionND(n_dims, in_ch, out_ch, ksize=ksize, stride=stride, pad=1, initialW=w) elif sample == 'up': self.c = L.DeconvolutionND(n_dims, in_ch, out_ch, ksize=ksize, stride=stride, pad=1, initialW=w) elif sample == 'up_shuffle': self.c = PixelShuffleUpsampler( n_dims, in_ch, out_ch, 2, ksize=ksize, stride=stride, ) else: raise KeyError('Unknown sampling type:' + sample) if bn: self.bn = L.BatchNormalization(out_ch)
def __init__(self, ch0, ch1, bn=True, sample='down', activation=F.leaky_relu, add_noise=False, sigma=0.2): self.bn = bn self.activation = activation self.add_noise = add_noise self.sigma = sigma self.iteration = 0 layers = {} w = chainer.initializers.Normal(0.02) if sample == 'down': layers['c'] = L.ConvolutionND(3, ch0, ch1, 4, 2, 1, initialW=w) elif sample == 'up': layers['c'] = L.DeconvolutionND(3, ch0, ch1, 4, 2, 1, initialW=w) elif sample == 'same': layers['c'] = L.ConvolutionND(3, ch0, ch1, 3, 1, 1, initialW=w) if bn: layers['batchnorm'] = L.BatchNormalization(ch1) super(CBR3D, self).__init__(**layers)
def __init__(self, n_voc, n_hidden): super(Generator, self).__init__() self.n_voc = n_voc self.n_hidden = n_hidden initializer = chainer.initializers.HeNormal() with self.init_scope(): self.l = L.Linear(16 * 16) self.deconv = L.DeconvolutionND(1, 16, 32, 7, stride=2, pad=2) self.block1 = GenBlock(32, 32, 4, 2, 1) self.block2 = GenBlock(32, 64, 4, 2, 1) self.block3 = GenBlock(64, 64, 4, 2, 1) self.block4 = GenBlock(64, 128, 4, 2, 1) self.block5 = DisBlock(128, 128, 5, 2, 0) self.block6 = DisBlock(128, 256, 3, 1, 1) self.bn = L.BatchNormalization(256) self.decoder = L.ConvolutionND(1, 256, n_voc, ksize=1, stride=1)
def __init__(self, ch0, ch1, bn=True, sample='down', activation=F.relu, dropout=False): self.bn = bn self.activation = activation self.dropout = dropout layers = {} w = chainer.initializers.Normal(0.02) if sample == 'down': layers['c'] = L.Convolution2D(ch0, ch1, 4, 2, 1, initialW=w) elif sample == "dis": layers['c'] = L.ConvolutionND(3, ch0, ch1, 4, 2, 1, initialW=w) else: layers['c'] = L.DeconvolutionND(3, ch0, ch1, 4, 2, 1, initialW=w) if bn: layers['batchnorm'] = L.BatchNormalization(ch1) super(CBR, self).__init__(**layers)
def __init__(self, n_frames=16, z_slow_dim=256, out_channels=3): self.n_frames = n_frames self.z_slow_dim = z_slow_dim self.out_channels = out_channels super(GeneratorConv3D, self).__init__() w = None with self.init_scope(): self.dc0 = L.DeconvolutionND(3, z_slow_dim, 512, 1, 1, 0, initialW=w) self.dc1 = L.DeconvolutionND(3, 512, 256, (1, 4, 4), (1, 2, 2), (0, 1, 1), initialW=w) self.dc2 = L.DeconvolutionND(3, 256, 128, (1, 4, 4), (1, 2, 2), (0, 1, 1), initialW=w) self.dc3 = L.DeconvolutionND(3, 128, 128, 4, 2, 1, initialW=w) self.dc4 = L.DeconvolutionND(3, 128, 64, 4, 2, 1, initialW=w) self.dc5 = L.DeconvolutionND(3, 64, 64, 4, 2, 1, initialW=w) self.dc6 = L.DeconvolutionND(3, 64, out_channels, 4, 2, 1, initialW=w) self.bn0 = make_batch_normalization(512) self.bn1 = make_batch_normalization(256) self.bn2 = make_batch_normalization(128) self.bn3 = make_batch_normalization(128) self.bn4 = make_batch_normalization(64) self.bn5 = make_batch_normalization(64)
def __init__(self, in_channel, out_channel, n_dims=2, kernel_size=3, n_layers=5, n_filters=64, is_bayesian=False, is_residual=False, initialW=initializers.HeNormal(), initial_bias=None, batch_norm=False, block_type='default', **kwargs): self.n_dims = n_dims if self.n_dims != 2 and self.n_dims != 3: warnings.warn('unsupported number of input dimensions.') self.in_channel = in_channel self.out_channel = out_channel self.n_layers = n_layers self.n_filters = n_filters self.kernel_size = kernel_size self.is_bayesian = is_bayesian self.is_residual = is_residual self.initialW = initialW self.initial_bias = initial_bias self.block_type = block_type self.batch_norm = batch_norm chainer.Chain.__init__(self) with self.init_scope(): # down convolution for i in range(1, self.n_layers + 1): if i == 1: setattr( self, 'down_unet_block_%d' % i, UNetBlock(self.n_dims, self.in_channel, self.n_filters * (2**(i - 1)), self.n_filters * (2**(i - 1)), self.kernel_size, initialW=initialW, initial_bias=initial_bias, is_residual=self.is_residual, block_type=block_type, batch_norm=batch_norm)) else: setattr( self, 'down_unet_block_%d' % i, UNetBlock(self.n_dims, self.n_filters * (2**(i - 2)), self.n_filters * (2**(i - 1)), self.n_filters * (2**(i - 1)), self.kernel_size, initialW=initialW, initial_bias=initial_bias, is_residual=self.is_residual, block_type=block_type, batch_norm=batch_norm)) # up convolution for i in range(1, self.n_layers): deconv_n_filters = self['down_unet_block_%d' % (i + 1)].out_channel setattr( self, 'deconv_%d' % i, L.DeconvolutionND(self.n_dims, deconv_n_filters, deconv_n_filters, self.kernel_size, stride=2, pad=0, initialW=initialW, initial_bias=initial_bias)) if self.batch_norm: setattr(self, 'bn_deconv_%d' % i, L.BatchNormalization(deconv_n_filters)) upconv_n_filters = self['down_unet_block_%d' % i].out_channel + self['deconv_%d' % i].W.shape[1] setattr( self, 'up_unet_block_%d' % i, UNetBlock(self.n_dims, upconv_n_filters, self.n_filters * (2**(i - 1)), self.n_filters * (2**(i - 1)), self.kernel_size, initialW=initialW, initial_bias=initial_bias, is_residual=self.is_residual, block_type=block_type, batch_norm=batch_norm)) if i == 1: # output layer setattr( self, 'up_conv%d_3' % i, L.ConvolutionND(self.n_dims, self.n_filters * (2**(i - 1)), self.out_channel, ksize=self.kernel_size, stride=1, pad=1, initialW=initialW, initial_bias=initial_bias)) # initialize weights for deconv layer for i in range(1, self.n_layers): deconv_k_size = self['deconv_%d' % i].W.shape[-1] deconv_n_filters = self['deconv_%d' % i].W.shape[1] self['deconv_%d' % i].W.data[...] = 0 if self.n_dims == 2: filt = get_upsampling_filter_2d(deconv_k_size) self['deconv_%d' % i].W.data[range(deconv_n_filters), range(deconv_n_filters), :, :] = filt elif self.n_dims == 3: filt = get_upsampling_filter_3d(deconv_k_size) self['deconv_%d' % i].W.data[range(deconv_n_filters), range(deconv_n_filters), :, :, :] = filt
def __init__(self, label): super(UNet3D, self).__init__() with self.init_scope(): #encorder pass self.conv1 = L.ConvolutionND(ndim=3, in_channels=1, out_channels=8, ksize=3, pad=1) self.bnc0 = L.BatchNormalization(8) self.conv2 = L.ConvolutionND(ndim=3, in_channels=8, out_channels=16, ksize=3, pad=1) self.bnc1 = L.BatchNormalization(16) self.conv3 = L.ConvolutionND(ndim=3, in_channels=16, out_channels=16, ksize=3, pad=1) self.bnc2 = L.BatchNormalization(16) self.conv4 = L.ConvolutionND(ndim=3, in_channels=16, out_channels=32, ksize=3, pad=1) self.bnc3 = L.BatchNormalization(32) self.conv5 = L.ConvolutionND(ndim=3, in_channels=32, out_channels=32, ksize=3, pad=1) self.bnc4 = L.BatchNormalization(32) self.conv6 = L.ConvolutionND(ndim=3, in_channels=32, out_channels=64, ksize=3, pad=1) self.bnc5 = L.BatchNormalization(64) #decorder pass self.dconv1 = L.DeconvolutionND(ndim=3, in_channels=64, out_channels=64, ksize=2, stride=2) self.conv7 = L.ConvolutionND(ndim=3, in_channels=32 + 64, out_channels=32, ksize=3, pad=1) self.bnd4 = L.BatchNormalization(32) self.conv8 = L.ConvolutionND(ndim=3, in_channels=32, out_channels=32, ksize=3, pad=1) self.bnd3 = L.BatchNormalization(32) self.dconv2 = L.DeconvolutionND(ndim=3, in_channels=32, out_channels=32, ksize=2, stride=2) self.conv9 = L.ConvolutionND(ndim=3, in_channels=16 + 32, out_channels=16, ksize=3, pad=1) self.bnd2 = L.BatchNormalization(16) self.conv10 = L.ConvolutionND(ndim=3, in_channels=16, out_channels=16, ksize=3, pad=1) self.bnd1 = L.BatchNormalization(16) self.lcl = L.ConvolutionND(ndim=3, in_channels=16, out_channels=label, ksize=1, pad=0)