def __init__(self, ch0, ch1, bn=True, sample='down', activation=F.relu, dropout=False) -> None: super().__init__() self.bn = bn self.activation = activation self.dropout = dropout w = chainer.initializers.Normal(0.02) with self.init_scope(): if sample == 'down': self.c = L.Convolution2D(ch0, ch1, 4, 2, 1, initialW=w) elif sample == 'up': self.c = L.Deconvolution2D(ch0, ch1, 4, 2, 1, initialW=w) else: self.c = L.Convolution2D(ch0, ch1, 1, 1, 0, initialW=w) if bn: self.batchnorm = L.BatchNormalization(ch1)
def setUp(self): args, kwargs = self.deconv_args kwargs['nobias'] = self.nobias self.link = L.Deconvolution2D(*args, **kwargs) if not self.nobias: self.link.b.data[...] = numpy.random.uniform( -1, 1, self.link.b.data.shape).astype(numpy.float32) out_channels = self.link.out_channels ksize = self.link.ksize stride = self.link.stride[0] pad = self.link.pad[0] N = 2 h, w = 3, 2 kh, kw = _pair(ksize) out_h = conv.get_deconv_outsize(h, kh, stride, pad) out_w = conv.get_deconv_outsize(w, kw, stride, pad) self.gy = numpy.random.uniform( -1, 1, (N, out_channels, out_h, out_w)).astype(numpy.float32) self.x = numpy.random.uniform(-1, 1, (N, 3, h, w)).astype(numpy.float32) self.link(chainer.Variable(self.x)) self.link.cleargrads()
def __init__(self, n_class=21): self.n_class = n_class kwargs = { 'initialW': chainer.initializers.Zero(), 'initial_bias': chainer.initializers.Zero(), } super(FCN32s, self).__init__() with self.init_scope(): self.conv1_1 = L.Convolution2D(3, 64, 3, 1, 100, **kwargs) self.conv1_2 = L.Convolution2D(64, 64, 3, 1, 1, **kwargs) self.conv2_1 = L.Convolution2D(64, 128, 3, 1, 1, **kwargs) self.conv2_2 = L.Convolution2D(128, 128, 3, 1, 1, **kwargs) self.conv3_1 = L.Convolution2D(128, 256, 3, 1, 1, **kwargs) self.conv3_2 = L.Convolution2D(256, 256, 3, 1, 1, **kwargs) self.conv3_3 = L.Convolution2D(256, 256, 3, 1, 1, **kwargs) self.conv4_1 = L.Convolution2D(256, 512, 3, 1, 1, **kwargs) self.conv4_2 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv4_3 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv5_1 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv5_2 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs) self.conv5_3 = L.Convolution2D(512, 512, 3, 1, 1, **kwargs) self.fc6 = L.Convolution2D(512, 4096, 7, 1, 0, **kwargs) self.fc7 = L.Convolution2D(4096, 4096, 1, 1, 0, **kwargs) self.score_fr = L.Convolution2D(4096, n_class, 1, 1, 0, **kwargs) self.upscore = L.Deconvolution2D( n_class, n_class, 64, 32, 0, nobias=True, initialW=initializers.UpsamplingDeconvWeight())
def __init__(self, in_ch, out_ch, sample): super().__init__() with self.init_scope(): self.bn = L.BatchNormalization(in_ch) if sample == 'down': self.c = L.Convolution2D(in_ch, out_ch, ksize=4, stride=2, pad=1) elif sample == 'up': self.c = L.Deconvolution2D(in_ch, out_ch, ksize=4, stride=2, pad=1) else: self.c = L.Convolution2D(in_ch, out_ch, ksize=1, stride=1, pad=0)
def __init__(self, n_first, n_latent): super(Decoder, self).__init__() self.n_first = n_first with self.init_scope(): self.d_l0 = L.Linear(n_latent, n_first * 16 * 4) self.d_dc0 = L.Deconvolution2D(in_channels=n_first * 16, out_channels=n_first * 16, ksize=4, stride=2, pad=1) self.d_dc1 = L.Deconvolution2D(in_channels=n_first * 16, out_channels=n_first * 8, ksize=4, stride=2, pad=1) self.d_dc2 = L.Deconvolution2D(in_channels=n_first * 8, out_channels=n_first * 4, ksize=4, stride=2, pad=1) self.d_dc3 = L.Deconvolution2D(in_channels=n_first * 4, out_channels=n_first * 2, ksize=4, stride=2, pad=1) self.d_dc4 = L.Deconvolution2D(in_channels=n_first * 2, out_channels=n_first, ksize=4, stride=2, pad=1) self.d_dc5 = L.Deconvolution2D(in_channels=n_first, out_channels=3, ksize=4, stride=2, pad=1) self.d_bn0 = L.BatchNormalization(n_first * 16, use_gamma=False) self.d_bn1 = L.BatchNormalization(n_first * 8, use_gamma=False) self.d_bn2 = L.BatchNormalization(n_first * 4, use_gamma=False) self.d_bn3 = L.BatchNormalization(n_first * 2, use_gamma=False) self.d_bn4 = L.BatchNormalization(n_first, use_gamma=False) self.d_bn5 = L.BatchNormalization(3, use_gamma=False)
def __init__(self): """ レイヤー定義 Parameter --------- chs : int residualblockのチャンネル数 layres : int residualblockの数 """ super(Generator, self).__init__() he_init = chainer.initializers.HeNormal() with self.init_scope(): self.e = L.Convolution2D(1025, chs, (4, 1), stride=(4, 1), initialW=he_init).add_hook(spn()) self.c11 = L.Convolution2D(chs, chs, (5, 1), initialW=he_init).add_hook(spn()) self.c12 = L.Convolution2D(chs, chs, (5, 1), initialW=he_init).add_hook(spn()) self.c21 = L.Convolution2D(chs, chs, (5, 1), initialW=he_init).add_hook(spn()) self.c22 = L.Convolution2D(chs, chs, (5, 1), initialW=he_init).add_hook(spn()) self.c31 = L.Convolution2D(chs, chs, (5, 1), initialW=he_init).add_hook(spn()) self.c32 = L.Convolution2D(chs, chs, (5, 1), initialW=he_init).add_hook(spn()) self.c41 = L.Convolution2D(chs, chs, (5, 1), initialW=he_init).add_hook(spn()) self.c42 = L.Convolution2D(chs, chs, (5, 1), initialW=he_init).add_hook(spn()) self.d = L.Deconvolution2D(chs, 1025, (4, 1), stride=(4, 1), initialW=he_init).add_hook(spn())
def __init__(self, n_class=4): self.n_class = n_class super(self.__class__, self).__init__( conv1_1=L.Convolution2D(3, 64, 3, stride=1, pad=100), conv1_2=L.Convolution2D(64, 64, 3, stride=1, pad=1), conv2_1=L.Convolution2D(64, 128, 3, stride=1, pad=1), conv2_2=L.Convolution2D(128, 128, 3, stride=1, pad=1), conv3_1=L.Convolution2D(128, 256, 3, stride=1, pad=1), conv3_2=L.Convolution2D(256, 256, 3, stride=1, pad=1), conv3_3=L.Convolution2D(256, 256, 3, stride=1, pad=1), conv5_1=L.Convolution2D(256, 256, 3, stride=1, pad=1), conv5_2=L.Convolution2D(256, 256, 3, stride=1, pad=1), conv5_3=L.Convolution2D(256, 256, 3, stride=1, pad=1), fc6=L.Convolution2D(256, 512, 7, stride=1, pad=0), fc7=L.Convolution2D(512, 512, 1, stride=1, pad=0), score_fr=L.Convolution2D(512, self.n_class, 1, stride=1, pad=0), upscore=L.Deconvolution2D(self.n_class, self.n_class, 64, stride=32, pad=0), ) self.train = False
def __init__(self, in_channels, out_channels, LoopTimes=5): super(Block, self).__init__() with self.init_scope(): """ ffconv : feedforword Convolution. fbconv : feedback Convolution (deconvolution). bpconv : bypass Convolution (1x1 convolution). update_late : A learnable and non-negative parameter. """ self.bn = L.BatchNormalization(in_channels) self.ffconv = L.Convolution2D(in_channels, out_channels, ksize=3, pad=1) self.fbconv = L.Deconvolution2D(out_channels, in_channels, ksize=3, pad=1) self.bpconv = L.Convolution2D(in_channels, out_channels, ksize=1) self.update_rate = L.Scale(axis=0, W_shape=(1, )) self.LoopTimes = LoopTimes self.out_channels = out_channels
def __init__(self, input_ch, out_ch, ksize=None, stride=1, pad=0, nobias=False, initialW=None, initial_bias=None, outsize=None): super().__init__() with self.init_scope(): self.c = L.Deconvolution2D(input_ch, out_ch, ksize=ksize, stride=stride, pad=pad, nobias=nobias, initialW=initialW, initial_bias=initial_bias, outsize=outsize) self.c.add_hook(SpectralNormalization())
def __init__(self, n_class, roi_size, spatial_scale, vgg_initialW=None, loc_initialW=None, score_initialW=None, mask_initialW=None, pooling_func=functions.roi_align_2d): # n_class includes the background super(VGG16RoIHead, self).__init__() with self.init_scope(): # self.fc6 = L.Linear(25088, 4096, initialW=vgg_initialW) self.fc6 = L.Linear(512 * roi_size * roi_size, 4096, initialW=vgg_initialW) self.fc7 = L.Linear(4096, 4096, initialW=vgg_initialW) self.cls_loc = L.Linear(4096, n_class * 4, initialW=loc_initialW) self.score = L.Linear(4096, n_class, initialW=score_initialW) # 7 x 7 x 512 -> 14 x 14 x 256 self.deconv6 = L.Deconvolution2D(512, 256, 2, stride=2, initialW=mask_initialW) # 14 x 14 x 256 -> 14 x 14 x 20 n_fg_class = n_class - 1 self.mask = L.Convolution2D(256, n_fg_class, 1, initialW=mask_initialW) self.n_class = n_class self.roi_size = roi_size self.spatial_scale = spatial_scale self._pooling_func = pooling_func
def __init__(self, noise=128, dim=64): super(Generator, self).__init__() self.dim = dim with self.init_scope(): self.fc = L.Linear(None, dim * 8 * 4 * 4) self.res1 = Block(6, 8 * dim, 8 * dim, norm=L.BatchNormalization) self.res1up = Block(1, 8 * dim, 4 * dim, resample='up', norm=L.BatchNormalization) self.res2 = Block(6, 4 * dim, 4 * dim, norm=L.BatchNormalization) self.res2up = Block(1, 4 * dim, 2 * dim, resample='up', norm=L.BatchNormalization) self.res3 = Block(6, 2 * dim, 2 * dim, norm=L.BatchNormalization) self.res3up = Block(1, 2 * dim, 1 * dim, resample='up', norm=L.BatchNormalization) self.res4 = Block(6, 1 * dim, 1 * dim, norm=L.BatchNormalization) self.res4up = Block(1, 1 * dim, dim // 2, resample='up', norm=L.BatchNormalization) self.res5 = Block(5, dim // 2, dim // 2, norm=L.BatchNormalization) self.conv = L.Deconvolution2D(None, 3, 3, pad=1) self.noise = noise
def __init__(self, channels_chrz, channels_u): super().__init__(lstm_tanh=L.Convolution2D(None, channels_chrz, ksize=5, stride=1, pad=2), lstm_i=L.Convolution2D(None, channels_chrz, ksize=5, stride=1, pad=2), lstm_f=L.Convolution2D(None, channels_chrz, ksize=5, stride=1, pad=2), lstm_o=L.Convolution2D(None, channels_chrz, ksize=5, stride=1, pad=2), mean_z=L.Convolution2D(None, channels_chrz, ksize=5, stride=1, pad=2), mean_x=L.Convolution2D(None, 3, ksize=1, stride=1, pad=0), deconv_h=L.Deconvolution2D(None, channels_u, ksize=4, stride=4, pad=0))
def setUp(self): out_channels = 2 ksize = 3 stride = 2 pad = 1 in_channels = None self.link = L.Deconvolution2D( in_channels, out_channels, ksize, stride=stride, pad=pad, nobias=self.nobias) if not self.nobias: self.link.b.data[...] = numpy.random.uniform( -1, 1, self.link.b.data.shape).astype(numpy.float32) N = 2 h, w = 3, 2 kh, kw = _pair(ksize) out_h = conv.get_deconv_outsize(h, kh, stride, pad) out_w = conv.get_deconv_outsize(w, kw, stride, pad) self.gy = numpy.random.uniform( -1, 1, (N, out_channels, out_h, out_w)).astype(numpy.float32) self.x = numpy.random.uniform( -1, 1, (N, 3, h, w)).astype(numpy.float32) self.link(chainer.Variable(self.x)) self.link.cleargrads()
def __init__(self, latent_size, wscale=0.02): w = chainer.initializers.Normal(wscale) super(Generator, self).__init__() self.latent_size = latent_size with self.init_scope(): self.fc1 = BatchLinear(latent_size, 4 * 4 * 128, initialW=w) self.deconv2 = BatchDeconv2D(128, 64, 4, pad=1, stride=2, initialW=w) self.deconv3 = BatchDeconv2D(64, 32, 4, pad=1, stride=2, initialW=w) self.deconv4 = L.Deconvolution2D(32, 1, 4, pad=1, stride=2, initialW=w)
def __init__(self, ch0, ch1, bn=True, sample='down', activation=F.leaky_relu, add_noise=False, sigma=0.2): self.bn = bn self.activation = activation self.add_noise = add_noise self.sigma = sigma self.iteration = 0 layers = {} w = chainer.initializers.Normal(0.02) if sample == 'down': layers['c'] = L.Convolution2D(ch0, ch1, 4, 2, 1, initialW=w) elif sample == 'up': layers['c'] = L.Deconvolution2D(ch0, ch1, 4, 2, 1, initialW=w) elif sample == 'same': layers['c'] = L.Convolution2D(ch0, ch1, 3, 1, 1, initialW=w) if bn: layers['batchnorm'] = L.BatchNormalization(ch1) super(CBR, self).__init__(**layers)
def __init__(self, n_units, dim_z, directory=None): super(TdVae, self).__init__( conv1=L.Convolution2D(1, 20, 9), lin1=L.Linear(20 * 20 * 20, 400), encoder=L.LSTM(400, out_size=n_units), bn=chainer.links.BatchNormalization(n_units), p_b_2_lin=L.Linear(n_units), p_b_2_m=L.Linear(dim_z), p_b_2_v=L.Linear(dim_z), q_I_lin=L.Linear(n_units), q_I_m=L.Linear(dim_z), q_I_v=L.Linear(dim_z), p_b_1_lin=L.Linear(n_units), p_b_1_m=L.Linear(dim_z), p_b_1_v=L.Linear(dim_z), p_p_2_lin=L.Linear(n_units), p_p_2_m=L.Linear(dim_z), p_p_2_v=L.Linear(dim_z), p_d_1=L.Linear(20 * 20 * 20), p_d_2=L.Linear(784), p_d_conv=L.Deconvolution2D(20, 1, 9), ) self.it = 0 self.directory = directory
def setUp(self): self.link = L.Deconvolution2D(self.in_channels, self.out_channels, self.ksize, stride=self.stride, pad=self.pad, nobias=self.nobias) self.link.W.data[...] = numpy.random.uniform( -1, 1, self.link.W.data.shape).astype(numpy.float32) if not self.nobias: self.link.b.data[...] = numpy.random.uniform( -1, 1, self.link.b.data.shape).astype(numpy.float32) self.link.zerograds() N = 2 h, w = 3, 2 kh, kw = _pair(self.ksize) out_h = conv.get_deconv_outsize(h, kh, self.stride, self.pad) out_w = conv.get_deconv_outsize(w, kw, self.stride, self.pad) self.gy = numpy.random.uniform( -1, 1, (N, self.out_channels, out_h, out_w)).astype(numpy.float32) self.x = numpy.random.uniform( -1, 1, (N, self.in_channels, h, w)).astype(numpy.float32)
def __init__(self, activation=F.relu): super(DispNet, self).__init__() self.activation = activation with self.init_scope(): self.c1 = L.Convolution2D(None, 32, ksize=7, stride=2, pad=3) self.c1b = L.Convolution2D(None, 32, ksize=7, stride=1, pad=3) self.c2 = L.Convolution2D(None, 64, ksize=5, stride=2, pad=2) self.c2b = L.Convolution2D(None, 64, ksize=5, stride=1, pad=2) self.c3 = L.Convolution2D(None, 128, ksize=3, stride=2, pad=1) self.c3b = L.Convolution2D(None, 128, ksize=3, stride=1, pad=1) self.c4 = L.Convolution2D(None, 256, ksize=3, stride=2, pad=1) self.c4b = L.Convolution2D(None, 256, ksize=3, stride=1, pad=1) self.c5 = L.Convolution2D(None, 512, ksize=3, stride=2, pad=1) self.c5b = L.Convolution2D(None, 512, ksize=3, stride=1, pad=1) self.c6 = L.Convolution2D(None, 512, ksize=3, stride=2, pad=1) self.c6b = L.Convolution2D(None, 512, ksize=3, stride=1, pad=1) self.c7 = L.Convolution2D(None, 512, ksize=3, stride=2, pad=1) self.c7b = L.Convolution2D(None, 512, ksize=3, stride=1, pad=1) self.dc7 = L.Deconvolution2D(None, 512, ksize=4, stride=2, pad=1) self.idc7 = L.Convolution2D(None, 512, ksize=3, stride=1, pad=1) self.dc6 = L.Deconvolution2D(None, 512, ksize=4, stride=2, pad=1) self.idc6 = L.Convolution2D(None, 512, ksize=3, stride=1, pad=1) self.dc5 = L.Deconvolution2D(None, 256, ksize=4, stride=2, pad=1) self.idc5 = L.Convolution2D(None, 256, ksize=3, stride=1, pad=1) self.dc4 = L.Deconvolution2D(None, 128, ksize=4, stride=2, pad=1) self.idc4 = L.Convolution2D(None, 128, ksize=3, stride=1, pad=1) self.dispout4 = L.Convolution2D(None, 1, ksize=3, stride=1, pad=1) self.dc3 = L.Deconvolution2D(None, 64, ksize=4, stride=2, pad=1) self.idc3 = L.Convolution2D(None, 64, ksize=3, stride=1, pad=1) self.dispout3 = L.Convolution2D(None, 1, ksize=3, stride=1, pad=1) self.dc2 = L.Deconvolution2D(None, 32, ksize=4, stride=2, pad=1) self.idc2 = L.Convolution2D(None, 32, ksize=3, stride=1, pad=1) self.dispout2 = L.Convolution2D(None, 1, ksize=3, stride=1, pad=1) self.dc1 = L.Deconvolution2D(None, 16, ksize=4, stride=2, pad=1) self.idc1 = L.Convolution2D(None, 16, ksize=3, stride=1, pad=1) self.dispout1 = L.Convolution2D(None, 1, ksize=3, stride=1, pad=1)
def __init__(self): super(Generator, self).__init__( dc0z=L.Deconvolution2D(nz, 512, args.final_filter_size, stride=args.stride, wscale=0.02 * math.sqrt(nz)), dc0s=L.Deconvolution2D(n_signal, 512, args.final_filter_size, stride=args.stride, wscale=0.02 * math.sqrt(n_signal)), dc1=L.Deconvolution2D(512, 256, 4, stride=2, pad=1, wscale=0.02 * math.sqrt(4 * 4 * 512)), dc2=L.Deconvolution2D(256, 128, 4, stride=2, pad=1, wscale=0.02 * math.sqrt(4 * 4 * 256)), dc3=L.Deconvolution2D(128, 64, 4, stride=2, pad=1, wscale=0.02 * math.sqrt(4 * 4 * 128)), dc4=L.Deconvolution2D(64, 1, 4, stride=2, pad=1, wscale=0.02 * math.sqrt(4 * 4 * 64)), bn0=L.BatchNormalization(512), bn1=L.BatchNormalization(256), bn2=L.BatchNormalization(128), bn3=L.BatchNormalization(64), )
def __init__(self): super(CAE, self).__init__() with self.init_scope(): self.conv1_1 = chainercv.links.Conv2DBNActiv(None, 32, 3, stride=1, pad=1) self.conv1_2 = chainercv.links.Conv2DBNActiv(None, 32, 3, stride=1, pad=1) self.conv2_1 = chainercv.links.Conv2DBNActiv(None, 32, 3, stride=1, pad=1) self.conv2_2 = chainercv.links.Conv2DBNActiv(None, 32, 3, stride=1, pad=1) self.conv3_1 = chainercv.links.Conv2DBNActiv(None, 32, 3, stride=1, pad=1) self.conv3_2 = chainercv.links.Conv2DBNActiv(None, 32, 3, stride=1, pad=1) self.deconv1_1 = L.Deconvolution2D(None, 32, 3, stride=1, pad=1) self.deconv1_2 = L.Deconvolution2D(None, 32, 3, stride=1, pad=1) self.deconv2_1 = L.Deconvolution2D(None, 32, 3, stride=1, pad=1) self.deconv2_2 = L.Deconvolution2D(None, 32, 3, stride=1, pad=1) self.deconv3_1 = L.Deconvolution2D(None, 32, 3, stride=1, pad=1) self.deconv3_2 = L.Deconvolution2D(None, 1, 3, stride=1, pad=1)
def __init__(self, act=F.relu): super(Decoder, self).__init__( linear0=L.Linear(10, 32), linear1=L.Linear(32, 64 * 7 * 7), deconv0=L.Deconvolution2D(64, 64, 2, stride=2, pad=0), deconv1=L.Deconvolution2D(64, 64, 3, stride=1, pad=1), deconv2=L.Deconvolution2D(64, 64, 3, stride=1, pad=1), deconv3=L.Deconvolution2D(64, 32, 2, stride=2, pad=0), deconv4=L.Deconvolution2D(32, 32, 3, stride=1, pad=1), deconv5=L.Deconvolution2D(32, 1, 3, stride=1, pad=1), bn_linear0=L.BatchNormalization(32, decay=0.9, use_cudnn=True), bn_linear1=L.BatchNormalization(64 * 7 * 7, decay=0.9, use_cudnn=True), bn_deconv0=L.BatchNormalization(64, decay=0.9, use_cudnn=True), bn_deconv1=L.BatchNormalization(64, decay=0.9, use_cudnn=True), bn_deconv2=L.BatchNormalization(64, decay=0.9, use_cudnn=True), bn_deconv3=L.BatchNormalization(32, decay=0.9, use_cudnn=True), bn_deconv4=L.BatchNormalization(32, decay=0.9, use_cudnn=True), ) self.act = act self.hiddens = []
def __init__(self, feature_map_nc, output_nc, tanh_constant, instance_normalization=True, w_init=None): self.tanh_constant = tanh_constant BN = InstanceNormalization if instance_normalization else L.BatchNormalization super(ImageTransformer, self).__init__( c1=L.Convolution2D(None, feature_map_nc, ksize=9, stride=1, pad=4, initialW=w_init), c2=L.Convolution2D(None, 2 * feature_map_nc, ksize=3, stride=2, pad=1, initialW=w_init), c3=L.Convolution2D(None, 4 * feature_map_nc, ksize=3, stride=2, pad=1, initialW=w_init), r1=ResidualBlock(4 * feature_map_nc, instance_normalization=instance_normalization, w_init=w_init), r2=ResidualBlock(4 * feature_map_nc, instance_normalization=instance_normalization, w_init=w_init), r3=ResidualBlock(4 * feature_map_nc, instance_normalization=instance_normalization, w_init=w_init), r4=ResidualBlock(4 * feature_map_nc, instance_normalization=instance_normalization, w_init=w_init), r5=ResidualBlock(4 * feature_map_nc, instance_normalization=instance_normalization, w_init=w_init), d1=L.Deconvolution2D(None, 2 * feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init), d2=L.Deconvolution2D(None, feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init), d3=L.Convolution2D(None, output_nc, ksize=9, stride=1, pad=4, initialW=w_init), b1=BN(feature_map_nc), b2=BN(2 * feature_map_nc), b3=BN(4 * feature_map_nc), b4=BN(2 * feature_map_nc), b5=BN(feature_map_nc))
def __init__(self, imap, omap, k=4, s=2, p=1, act=F.relu): super(DeconvUnit, self).__init__( deconv=L.Deconvolution2D(imap, omap, ksize=k, stride=s, pad=p, ), bn=L.BatchNormalization(omap, decay=0.9, use_cudnn=True), ) self.act = act
def grouped_deconv(): conv = L.Deconvolution2D(16, 32, ksize=3, pad=1, groups=4) y = conv(x) return {'input': x}, {'out': y}
def __init__(self, n_in, n_out, ksize, stride, pad): super(Deconv, self).__init__() with self.init_scope(): self.conv = L.Deconvolution2D(n_in, n_out, ksize, stride, pad) self.bn = L.BatchNormalization(n_out)
def deconvolution(): conv = L.Deconvolution2D(16, 32, ksize=3, pad=1) y = conv(x) return {'input': x}, {'out': y}
def __init__(self): super(SSDNet, self).__init__( conv1_1=L.Convolution2D(3, 64, ksize=3, stride=1, pad=1), conv1_2=L.Convolution2D(64, 64, ksize=3, stride=1, pad=1), conv2_1=L.Convolution2D(64, 128, ksize=3, stride=1, pad=1), conv2_2=L.Convolution2D(128, 128, ksize=3, stride=1, pad=1), conv3_1=L.Convolution2D(128, 256, ksize=3, stride=1, pad=1), conv3_2=L.Convolution2D(256, 256, ksize=3, stride=1, pad=1), conv3_3=L.Convolution2D(256, 256, ksize=3, stride=1, pad=1), conv4_1=L.Convolution2D(256, 512, ksize=3, stride=1, pad=1), conv4_2=L.Convolution2D(512, 512, ksize=3, stride=1, pad=1), conv4_3=L.Convolution2D(512, 512, ksize=3, stride=1, pad=1), conv5_1=L.Convolution2D(512, 512, ksize=3, stride=1, pad=1), conv5_2=L.Convolution2D(512, 512, ksize=3, stride=1, pad=1), conv5_3=L.Convolution2D(512, 512, ksize=3, stride=1, pad=1), fc6=L.Convolution2D(512, 1024, ksize=3, stride=1, pad=1), fc7=L.Convolution2D(1024, 1024, ksize=1, stride=1, pad=0), conv6_1=L.Convolution2D(1024, 256, ksize=1, stride=1, pad=0), conv6_2=L.Convolution2D(256, 512, ksize=3, stride=2, pad=1), bn_conv6=L.BatchNormalization(512), conv7_1=L.Convolution2D(512, 128, ksize=1, stride=1, pad=0), conv7_2=L.Convolution2D(128, 256, ksize=3, stride=2, pad=1), conv8_1=L.Convolution2D(256, 128, ksize=1, stride=1, pad=0), conv8_2=L.Convolution2D(128, 256, ksize=3, stride=1, pad=0), conv9_1=L.Convolution2D(256, 128, ksize=1, stride=1, pad=0), conv9_2=L.Convolution2D(128, 256, ksize=3, stride=1, pad=0), conv9_2_mbox_loc=L.Convolution2D(256, common_params.num_of_offset_dims * common_params.num_boxes[5], ksize=3, stride=1, pad=1), conv9_2_mbox_cls=L.Convolution2D(256, common_params.num_of_classes * common_params.num_boxes[5], ksize=3, stride=1, pad=1), deconv9_2_1=L.Deconvolution2D(256, common_params.num_of_classes, ksize=10, stride=1, pad=0), #10×10 deconv9_2_2=L.Deconvolution2D(common_params.num_of_classes, common_params.num_of_classes, ksize=11, stride=8, pad=4), #75×75 deconv9_2_3=L.Deconvolution2D(common_params.num_of_classes, common_params.num_of_classes, ksize=6, stride=4, pad=1), #300×300 deconv1=L.Deconvolution2D(256, 256, ksize=3, stride=1, pad=0), #3×3 conv8_2_mbox_loc=L.Convolution2D(256, common_params.num_of_offset_dims * common_params.num_boxes[4], ksize=3, stride=1, pad=1), conv8_2_mbox_cls=L.Convolution2D(256, common_params.num_of_classes * common_params.num_boxes[4], ksize=3, stride=1, pad=1), deconv8_2_1=L.Deconvolution2D(256, common_params.num_of_classes, ksize=9, stride=6, pad=1), #19×19 deconv8_2_2=L.Deconvolution2D(common_params.num_of_classes, common_params.num_of_classes, ksize=7, stride=4, pad=2), #75×75 deconv8_2_3=L.Deconvolution2D(common_params.num_of_classes, common_params.num_of_classes, ksize=6, stride=4, pad=1), #300×300 deconv2=L.Deconvolution2D(256, 256, ksize=3, stride=2, pad=1), #5×5 conv7_2_mbox_loc=L.Convolution2D(256, common_params.num_of_offset_dims * common_params.num_boxes[3], ksize=3, stride=1, pad=1), conv7_2_mbox_cls=L.Convolution2D(256, common_params.num_of_classes * common_params.num_boxes[3], ksize=3, stride=1, pad=1), deconv7_2_1=L.Deconvolution2D(256, common_params.num_of_classes, ksize=7, stride=4, pad=2), #19×19 deconv7_2_2=L.Deconvolution2D(common_params.num_of_classes, common_params.num_of_classes, ksize=7, stride=4, pad=2), #75×75 deconv7_2_3=L.Deconvolution2D(common_params.num_of_classes, common_params.num_of_classes, ksize=6, stride=4, pad=1), #300×300 deconv3=L.Deconvolution2D(256, 512, ksize=4, stride=2, pad=1), #10×10 conv6_2_mbox_loc=L.Convolution2D(512, common_params.num_of_offset_dims * common_params.num_boxes[2], ksize=3, stride=1, pad=1), conv6_2_mbox_cls=L.Convolution2D(512, common_params.num_of_classes * common_params.num_boxes[2], ksize=3, stride=1, pad=1), deconv6_2_1=L.Deconvolution2D(512, common_params.num_of_classes, ksize=6, stride=4, pad=2), #38×38 deconv6_2_2=L.Deconvolution2D(common_params.num_of_classes, common_params.num_of_classes, ksize=3, stride=2, pad=1), #75×75 deconv6_2_3=L.Deconvolution2D(common_params.num_of_classes, common_params.num_of_classes, ksize=6, stride=4, pad=1), #300×300 deconv4=L.Deconvolution2D(512, 1024, ksize=3, stride=2, pad=1), #19×19 fc7_mbox_loc=L.Convolution2D(1024, common_params.num_of_offset_dims * common_params.num_boxes[1], ksize=3, stride=1, pad=1), fc7_mbox_cls=L.Convolution2D(1024, common_params.num_of_classes * common_params.num_boxes[1], ksize=3, stride=1, pad=1), deconvfc7_1=L.Deconvolution2D(1024, common_params.num_of_classes, ksize=4, stride=2, pad=1), #38×38 deconvfc7_2=L.Deconvolution2D(common_params.num_of_classes, common_params.num_of_classes, ksize=3, stride=2, pad=1), #75×75 deconvfc7_3=L.Deconvolution2D(common_params.num_of_classes, common_params.num_of_classes, ksize=6, stride=4, pad=1), #300×300 deconv5=L.Deconvolution2D(1024, 512, ksize=4, stride=2, pad=1), #38×38 bn4_3=L.BatchNormalization(512), conv4_3_norm_mbox_loc=L.Convolution2D( 512, common_params.num_of_offset_dims * common_params.num_boxes[0], ksize=3, stride=1, pad=1), conv4_3_norm_mbox_cls=L.Convolution2D( 512, common_params.num_of_classes * common_params.num_boxes[0], ksize=3, stride=1, pad=1), deconv4_3_1=L.Deconvolution2D(512, common_params.num_of_classes, ksize=3, stride=2, pad=1), #75×75 deconv4_3_2=L.Deconvolution2D(common_params.num_of_classes, common_params.num_of_classes, ksize=4, stride=2, pad=1), #150×150 deconv4_3_3=L.Deconvolution2D(common_params.num_of_classes, common_params.num_of_classes, ksize=4, stride=2, pad=1), #300×300 segconv=L.Convolution2D(common_params.num_of_classes * 6, common_params.num_of_classes, ksize=1, stride=1, pad=0)) self.train = False
def __init__(self, d=256): super(Decoder, self).__init__() with self.init_scope(): self.dconv1 = L.Deconvolution2D(d, d, 4, 2, 1) self.dconv2 = L.Deconvolution2D(d, 1, 4, 2, 1)
def __init__(self, train=False): self.train = train base = 64 super(Mynet, self).__init__() with self.init_scope(): self.enc1 = chainer.Sequential() for i in range(2): self.enc1.append( L.Convolution2D(None, base, ksize=3, pad=1, stride=1, nobias=True)) self.enc1.append(F.relu) self.enc1.append(L.BatchNormalization(base)) self.enc2 = chainer.Sequential() for i in range(2): self.enc2.append( L.Convolution2D(None, base * 2, ksize=3, pad=1, stride=1, nobias=True)) self.enc2.append(F.relu) self.enc2.append(L.BatchNormalization(base * 2)) self.enc3 = chainer.Sequential() for i in range(2): self.enc3.append( L.Convolution2D(None, base * 4, ksize=3, pad=1, stride=1, nobias=True)) self.enc3.append(F.relu) self.enc3.append(L.BatchNormalization(base * 4)) self.enc4 = chainer.Sequential() for i in range(2): self.enc4.append( L.Convolution2D(None, base * 8, ksize=3, pad=1, stride=1, nobias=True)) self.enc4.append(F.relu) self.enc4.append(L.BatchNormalization(base * 8)) self.enc5 = chainer.Sequential() for i in range(2): self.enc5.append( L.Convolution2D(None, base * 16, ksize=3, pad=1, stride=1, nobias=True)) self.enc5.append(F.relu) self.enc5.append(L.BatchNormalization(base * 16)) self.upsample4 = chainer.Sequential() self.upsample4.append( L.Deconvolution2D(None, base * 8, ksize=2, stride=2)) self.upsample4.append(F.relu) self.upsample4.append(L.BatchNormalization(base * 8)) self.dec4 = chainer.Sequential() for i in range(2): self.dec4.append( L.Convolution2D(None, base * 8, ksize=3, pad=1, stride=1, nobias=True)) self.dec4.append(F.relu) self.dec4.append(L.BatchNormalization(base * 8)) self.upsample3 = chainer.Sequential() self.upsample3.append( L.Deconvolution2D(None, base * 4, ksize=2, stride=2)) self.upsample3.append(F.relu) self.upsample3.append(L.BatchNormalization(base * 4)) self.dec3 = chainer.Sequential() for i in range(2): self.dec3.append( L.Convolution2D(None, base * 4, ksize=3, pad=1, stride=1, nobias=True)) self.dec3.append(F.relu) self.dec3.append(L.BatchNormalization(base * 4)) self.upsample2 = chainer.Sequential() self.upsample2.append( L.Deconvolution2D(None, base * 2, ksize=2, stride=2)) self.upsample2.append(F.relu) self.upsample2.append(L.BatchNormalization(base * 2)) self.dec2 = chainer.Sequential() for i in range(2): self.dec2.append( L.Convolution2D(None, base * 2, ksize=3, pad=1, stride=1, nobias=True)) self.dec2.append(F.relu) self.dec2.append(L.BatchNormalization(base * 2)) self.upsample1 = chainer.Sequential() self.upsample1.append( L.Deconvolution2D(None, base, ksize=2, stride=2)) self.upsample1.append(F.relu) self.upsample1.append(L.BatchNormalization(base)) self.dec1 = chainer.Sequential() for i in range(2): self.dec1.append( L.Convolution2D(None, base, ksize=3, pad=1, stride=1, nobias=True)) self.dec1.append(F.relu) self.dec1.append(L.BatchNormalization(base)) self.out = L.Convolution2D(None, num_classes + 1, ksize=1, pad=0, stride=1, nobias=False)
def __init__(self, l_latent=64, l_seq=32, mode='generator', bn=False, activate_func=F.leaky_relu, vertical_ksize=1): if l_seq % 32 != 0: raise ValueError('\'l_seq\' must be divisible by 32.') if not mode in ['discriminator', 'generator']: raise ValueError( '\'mode\' must be \'discriminator\' or \'generator\'.') super(ConvAE, self).__init__() self.bn = bn self.mode = mode self.activate_func = activate_func w = chainer.initializers.Normal(0.02) with self.init_scope(): self.conv1 = L.Convolution2D(None, 32, ksize=(4, vertical_ksize), stride=(2, 1), pad=(1, (vertical_ksize - 1) // 2), initialW=w) self.conv2 = L.Convolution2D(32, 64, ksize=(4, vertical_ksize), stride=(2, 1), pad=(1, (vertical_ksize - 1) // 2), initialW=w) self.conv3 = L.Convolution2D(64, 64, ksize=(4, vertical_ksize), stride=(2, 1), pad=(1, (vertical_ksize - 1) // 2), initialW=w) self.conv4 = L.Convolution2D(64, 64, ksize=(4, vertical_ksize), stride=(2, 1), pad=(1, (vertical_ksize - 1) // 2), initialW=w) self.conv5 = L.Convolution2D(64, 128, ksize=(4, vertical_ksize), stride=(2, 1), pad=(1, (vertical_ksize - 1) // 2), initialW=w) self.enc_l = L.Linear(128 * 34 * l_seq // 32, l_latent, initialW=w) if self.mode == 'generator': self.dec_l = L.Linear(l_latent, 128 * 17 * l_seq // 32, initialW=w) self.deconv1 = L.Deconvolution2D( 128, 64, ksize=(4, vertical_ksize), stride=(2, 1), pad=(1, (vertical_ksize - 1) // 2), initialW=w) self.deconv2 = L.Deconvolution2D( 64, 64, ksize=(4, vertical_ksize), stride=(2, 1), pad=(1, (vertical_ksize - 1) // 2), initialW=w) self.deconv3 = L.Deconvolution2D( 64, 64, ksize=(4, vertical_ksize), stride=(2, 1), pad=(1, (vertical_ksize - 1) // 2), initialW=w) self.deconv4 = L.Deconvolution2D( 64, 32, ksize=(4, vertical_ksize), stride=(2, 1), pad=(1, (vertical_ksize - 1) // 2), initialW=w) self.deconv5 = L.Deconvolution2D( 32, 1, ksize=(4, vertical_ksize), stride=(2, 1), pad=(1, (vertical_ksize - 1) // 2), initialW=w) if self.bn: self.enc_bn1 = L.BatchNormalization(32) self.enc_bn2 = L.BatchNormalization(64) self.enc_bn3 = L.BatchNormalization(64) self.enc_bn4 = L.BatchNormalization(64) self.enc_bn5 = L.BatchNormalization(128) if self.mode == 'generator': self.dec_bn1 = L.BatchNormalization(128) self.dec_bn2 = L.BatchNormalization(64) self.dec_bn3 = L.BatchNormalization(64) self.dec_bn4 = L.BatchNormalization(64) self.dec_bn5 = L.BatchNormalization(32)