def __init__(self, n_classes_fcn, n_classes_yolo, n_boxes): super(YOLOv2, self).__init__( conv1=L.Convolution2D(3, 64, 3, stride=1, pad=1, nobias=True), bn1=L.BatchNormalization(64, use_beta=False, eps=2e-5), bias1=L.Bias(shape=(64, )), conv2=L.Convolution2D(None, 64, 3, stride=1, pad=1, nobias=True), bn2=L.BatchNormalization(64, use_beta=False, eps=2e-5), bias2=L.Bias(shape=(64, )), conv3=L.Convolution2D(None, 128, 3, stride=1, pad=1, nobias=True), bn3=L.BatchNormalization(128, use_beta=False, eps=2e-5), bias3=L.Bias(shape=(128, )), conv4=L.Convolution2D(None, 128, 3, stride=1, pad=1, nobias=True), bn4=L.BatchNormalization(128, use_beta=False, eps=2e-5), bias4=L.Bias(shape=(128, )), conv5=L.Convolution2D(None, 256, 3, stride=1, pad=1, nobias=True), bn5=L.BatchNormalization(256, use_beta=False, eps=2e-5), bias5=L.Bias(shape=(256, )), conv6=L.Convolution2D(None, 256, 3, stride=1, pad=1, nobias=True), bn6=L.BatchNormalization(256, use_beta=False, eps=2e-5), bias6=L.Bias(shape=(256, )), conv7=L.Convolution2D(None, 256, 3, stride=1, pad=1, nobias=True), bn7=L.BatchNormalization(256, use_beta=False, eps=2e-5), bias7=L.Bias(shape=(256, )), conv8=L.Convolution2D(None, 512, 3, stride=1, pad=1, nobias=True), bn8=L.BatchNormalization(512, use_beta=False, eps=2e-5), bias8=L.Bias(shape=(512, )), conv9=L.Convolution2D(None, 512, 3, stride=1, pad=1, nobias=True), bn9=L.BatchNormalization(512, use_beta=False, eps=2e-5), bias9=L.Bias(shape=(512, )), conv10=L.Convolution2D(None, 512, 3, stride=1, pad=1, nobias=True), bn10=L.BatchNormalization(512, use_beta=False, eps=2e-5), bias10=L.Bias(shape=(512, )), conv11=L.Convolution2D(None, 512, 3, stride=1, pad=1, nobias=True), bn11=L.BatchNormalization(512, use_beta=False, eps=2e-5), bias11=L.Bias(shape=(512, )), conv12=L.Convolution2D(None, 512, 3, stride=1, pad=1, nobias=True), bn12=L.BatchNormalization(512, use_beta=False, eps=2e-5), bias12=L.Bias(shape=(512, )), conv13=L.Convolution2D(None, 512, 3, stride=1, pad=1, nobias=True), bn13=L.BatchNormalization(512, use_beta=False, eps=2e-5), bias13=L.Bias(shape=(512, )), pool1=L.Convolution2D(None, n_classes_fcn, 1, stride=1, pad=0), pool2=L.Convolution2D(None, n_classes_fcn, 1, stride=1, pad=0), pool3=L.Convolution2D(None, n_classes_fcn, 1, stride=1, pad=0), upsample1=L.Deconvolution2D(None, n_classes_fcn, ksize=4, stride=2, pad=1), upsample2=L.Deconvolution2D(None, n_classes_fcn, ksize=8, stride=4, pad=2), upsample3=L.Deconvolution2D(None, n_classes_fcn, ksize=16, stride=8, pad=4), conv14=L.Convolution2D(None, 1024, 3, stride=1, pad=1, nobias=True), bn14=L.BatchNormalization(1024, use_beta=False, eps=2e-5), bias14=L.Bias(shape=(1024, )), conv15=L.Convolution2D(None, n_boxes * (5 + n_classes_yolo), ksize=1, stride=1, pad=0), ) self.n_boxes = n_boxes self.n_classes_fcn = n_classes_fcn self.n_classes_yolo = n_classes_yolo self.finetune = False
def __init__(self, dim_in=512, scaling=1.0): super(ConvolutionShapeDecoder, self).__init__() self.grid_size = 16 self.obj_scale = 0.5 self.scaling = scaling # create base shape & faces and transforming matrix self.vertices_base = None self.vertices_matrix = None self.num_vertices = None self.faces = None self.init_vertices_base() self.init_faces() self.laplacian = get_graph_laplacian(self.faces, self.num_vertices) self.normalize_vertices_base() # init NN layers with self.init_scope(): dh = [512, 256, 128, 64, 3] init = chainer.initializers.HeNormal() layers = {} for i in range(6): layers['linear_p%d_in' % i] = cl.Linear(dim_in, dh[0] * 4, initialW=init, nobias=True) layers['conv_p%d_1_1_1' % i] = (cl.Deconvolution2D( dh[0], dh[1], 3, 2, 1, outsize=(4, 4), initialW=init, nobias=True)) layers['conv_p%d_1_1_2' % i] = (cl.Convolution2D(dh[1], dh[1], 3, 1, 1, initialW=init, nobias=True)) layers['conv_p%d_1_1_3' % i] = (cl.Deconvolution2D( dh[0], dh[1], 1, 2, 0, outsize=(4, 4), initialW=init, nobias=True)) layers['conv_p%d_1_2_1' % i] = (cl.Convolution2D(dh[1], dh[1], 3, 1, 1, initialW=init, nobias=True)) layers['conv_p%d_1_2_2' % i] = (cl.Convolution2D(dh[1], dh[1], 3, 1, 1, initialW=init, nobias=True)) layers['conv_p%d_2_1_1' % i] = (cl.Deconvolution2D( dh[1], dh[2], 3, 2, 1, outsize=(8, 8), initialW=init, nobias=True)) layers['conv_p%d_2_1_2' % i] = (cl.Convolution2D(dh[2], dh[2], 3, 1, 1, initialW=init, nobias=True)) layers['conv_p%d_2_1_3' % i] = (cl.Deconvolution2D( dh[1], dh[2], 1, 2, 0, outsize=(8, 8), initialW=init, nobias=True)) layers['conv_p%d_2_2_1' % i] = (cl.Convolution2D(dh[2], dh[2], 3, 1, 1, initialW=init, nobias=True)) layers['conv_p%d_2_2_2' % i] = (cl.Convolution2D(dh[2], dh[2], 3, 1, 1, initialW=init, nobias=True)) layers['conv_p%d_3_1_1' % i] = (cl.Deconvolution2D( dh[2], dh[3], 3, 2, 1, outsize=(16, 16), initialW=init, nobias=True)) layers['conv_p%d_3_1_2' % i] = (cl.Convolution2D(dh[3], dh[3], 3, 1, 1, initialW=init, nobias=True)) layers['conv_p%d_3_1_3' % i] = (cl.Deconvolution2D( dh[2], dh[3], 1, 2, 0, outsize=(16, 16), initialW=init, nobias=True)) layers['conv_p%d_3_2_1' % i] = (cl.Convolution2D(dh[3], dh[3], 3, 1, 1, initialW=init, nobias=True)) layers['conv_p%d_3_2_2' % i] = (cl.Convolution2D(dh[3], dh[3], 3, 1, 1, initialW=init, nobias=True)) layers['linear_p%d_out' % i] = cl.Convolution2D(dh[3], dh[4], 1, 1, 0, initialW=init) layers['linear_p%d_in_bn' % i] = cl.BatchNormalization(dh[0]) layers['conv_p%d_1_1_2_bn' % i] = cl.BatchNormalization(dh[1]) layers['conv_p%d_1_2_1_bn' % i] = cl.BatchNormalization(dh[1]) layers['conv_p%d_1_2_2_bn' % i] = cl.BatchNormalization(dh[1]) layers['conv_p%d_2_1_1_bn' % i] = cl.BatchNormalization(dh[1]) layers['conv_p%d_2_1_2_bn' % i] = cl.BatchNormalization(dh[2]) layers['conv_p%d_2_2_1_bn' % i] = cl.BatchNormalization(dh[2]) layers['conv_p%d_2_2_2_bn' % i] = cl.BatchNormalization(dh[2]) layers['conv_p%d_3_1_1_bn' % i] = cl.BatchNormalization(dh[2]) layers['conv_p%d_3_1_2_bn' % i] = cl.BatchNormalization(dh[3]) layers['conv_p%d_3_2_1_bn' % i] = cl.BatchNormalization(dh[3]) layers['conv_p%d_3_2_2_bn' % i] = cl.BatchNormalization(dh[3]) layers['linear_p%d_out_bn' % i] = cl.BatchNormalization(dh[3]) for k, v in layers.items(): setattr(self, k, v) self.vertices_base = chainer.Parameter(self.vertices_base)
def __init__(self, dim_in=512, scaling=1.0, symmetric=False): super(ConvolutionTextureDecoder, self).__init__() self.grid_size = 16 self.texture_size = 64 self.scaling = scaling self.symmetric = symmetric self.vertices = None self.faces = None self.compute_vertices() with self.init_scope(): dim_out = 3 dh = [512, 256, 128, 64] init = chainer.initializers.HeNormal() layer_list = {} for i in range(6): layer_list['linear_p%d_1' % i] = cl.Linear(dim_in, dh[0] * 4 * 4, initialW=init, nobias=True) layer_list['conv_p%d_1' % i] = (cl.Deconvolution2D( dh[0], dh[1], 5, 2, 2, outsize=(8, 8), initialW=init, nobias=True)) layer_list['conv_p%d_2' % i] = (cl.Deconvolution2D( dh[1], dh[2], 5, 2, 2, outsize=(16, 16), initialW=init, nobias=True)) layer_list['conv_p%d_3' % i] = (cl.Deconvolution2D( dh[2], dh[3], 5, 2, 2, outsize=(32, 32), initialW=init, nobias=True)) layer_list['conv_p%d_4' % i] = (cl.Deconvolution2D( dh[3], dim_out, 5, 2, 2, outsize=(64, 64), initialW=init)) layer_list['linear_p%d_1_bn' % i] = cl.BatchNormalization( dh[0]) layer_list['conv_p%d_1_bn' % i] = cl.BatchNormalization(dh[1]) layer_list['conv_p%d_2_bn' % i] = cl.BatchNormalization(dh[2]) layer_list['conv_p%d_3_bn' % i] = cl.BatchNormalization(dh[3]) for k, v in layer_list.items(): setattr(self, k, v) self.texture_base = chainer.Parameter( chainer.initializers.Constant(0), (3, self.texture_size, 6 * self.texture_size))
def __init__(self, model_params): super(DNN, self).__init__( l1 = L.Linear(model_params['fp_length'],model_params['h1_size']), l2 = L.Linear(model_params['h1_size'],1), bnorm1 = L.BatchNormalization(model_params['h1_size']), )
def __init__( self, img_width=64, img_height=64, color_channels=3, encode_layers=[1000, 600, 300], latent_width=100, mode='convolution', ): self.img_width = img_width self.img_height = img_height self.color_channels = color_channels self.encode_layers = encode_layers self.latent_width = latent_width self.mode = mode self.img_len = self.img_width * self.img_height * self.color_channels self._layers = {} if self.mode == 'convolution': self._layers['conv1'] = L.Convolution2D(self.color_channels, 32, 4, stride=2, pad=1, wscale=0.02 * np.sqrt(4 * 4 * 3)) self._layers['conv2'] = L.Convolution2D(32, 64, 4, stride=2, pad=1, wscale=0.02 * np.sqrt(4 * 4 * 32)) self._layers['conv3'] = L.Convolution2D(64, 128, 4, stride=2, pad=1, wscale=0.02 * np.sqrt(4 * 4 * 64)) self._layers['conv4'] = L.Convolution2D(128, 256, 4, stride=2, pad=1, wscale=0.02 * np.sqrt(4 * 4 * 128)) self._layers['conv5'] = L.Convolution2D(256, 512, 4, stride=2, pad=1, wscale=0.02 * np.sqrt(4 * 4 * 256)) self._layers['bn1'] = L.BatchNormalization(32) self._layers['bn2'] = L.BatchNormalization(64) self._layers['bn3'] = L.BatchNormalization(128) self._layers['bn4'] = L.BatchNormalization(256) self._layers['bn5'] = L.BatchNormalization(512) self._layers['bn6'] = L.BatchNormalization(self.latent_width * 2) self.img_len = reduce( lambda x, y: x * y, calc_fc_size(self.img_height, self.img_width)) self.img_width, self.img_height = calc_fc_size( self.img_height, self.img_width)[1:] self.img_width, self.img_height = calc_im_size( self.img_height, self.img_width) self._layers['lin'] = L.Linear(self.img_len, 2 * self.latent_width) elif self.mode == 'linear': # Encoding Steps encode_layer_pairs = [] if len(self.encode_layers) > 0: encode_layer_pairs = [(self.img_len, self.encode_layers[0])] if len(self.encode_layers) > 1: encode_layer_pairs += zip(self.encode_layers[:-1], self.encode_layers[1:]) if self.encode_layers: encode_layer_pairs += [(self.encode_layers[-1], self.latent_width * 2)] else: encode_layer_pairs += [(self.img_len, self.latent_width * 2)] for i, (n_in, n_out) in enumerate(encode_layer_pairs): self._layers['linear_%i' % i] = L.Linear(n_in, n_out) else: raise NameError( "Improper mode type %s. Encoder mode must be 'linear' or 'convolution'." % self.mode) super(Encoder, self).__init__(**self._layers)
def __init__(self, n_out): super(Block, self).__init__() with self.init_scope(): self.li = L.Linear(None, n_out) self.bn = L.BatchNormalization(n_out)
def __init__(self): super(ResidualBlock, self).__init__(L.Convolution2D(128, 128, 3, pad=1), L.BatchNormalization(128), L.Convolution2D(128, 128, 3, pad=1), L.BatchNormalization(128))
def __init__(self, in_channels, out_channels): super(DenseBlock, self).__init__() with self.init_scope(): self.fc = L.Linear(in_size=in_channels, out_size=out_channels) self.bn = L.BatchNormalization(size=out_channels, eps=1e-5) self.activ = F.relu
def __init__(self, out_ch): super(BG, self).__init__() with self.init_scope(): self.bn0 = L.BatchNormalization(out_ch)
def __init__(self): super(DenseASPP, self).__init__( # First Convolution convF1=L.Convolution2D(3, BASE_CHANNEL, ksize=3, stride=1, pad=1, initialW=iniW), # 128x128 to 128x128 convF2=L.Convolution2D(BASE_CHANNEL, BASE_CHANNEL, ksize=3, stride=1, pad=1, initialW=iniW), # 128x128 to 128x128 convF3=L.Convolution2D(BASE_CHANNEL, BASE_CHANNEL, ksize=3, stride=1, pad=1, initialW=iniW), conv1x1_D3=L.Convolution2D(BASE_CHANNEL, BASE_CHANNEL // 2, ksize=1, stride=1, pad=0, initialW=iniW), # 128x128 to 128x128 dilate_conv3=L.DilatedConvolution2D(BASE_CHANNEL // 2, BASE_CHANNEL // 8, ksize=3, stride=1, pad=2, dilate=2, nobias=False, initialW=None, initial_bias=None), conv1x1_D6=L.Convolution2D(BASE_CHANNEL * 9 // 8, BASE_CHANNEL // 2, ksize=1, stride=1, pad=0, initialW=iniW), dilate_conv6=L.DilatedConvolution2D(BASE_CHANNEL // 2, BASE_CHANNEL // 8, ksize=3, stride=1, pad=5, dilate=5, nobias=False, initialW=None, initial_bias=None), conv1x1_D12=L.Convolution2D(BASE_CHANNEL * 10 // 8, BASE_CHANNEL // 2, ksize=1, stride=1, pad=0, initialW=iniW), dilate_conv12=L.DilatedConvolution2D(BASE_CHANNEL // 2, BASE_CHANNEL // 8, ksize=3, stride=1, pad=11, dilate=11, nobias=False, initialW=None, initial_bias=None), conv1x1_D18=L.Convolution2D(BASE_CHANNEL * 11 // 8, BASE_CHANNEL // 2, ksize=1, stride=1, pad=0, initialW=iniW), dilate_conv18=L.DilatedConvolution2D(BASE_CHANNEL // 2, BASE_CHANNEL // 8, ksize=3, stride=1, pad=17, dilate=17, nobias=False, initialW=None, initial_bias=None), conv1x1_D24=L.Convolution2D(BASE_CHANNEL * 12 // 8, BASE_CHANNEL // 2, ksize=1, stride=1, pad=0, initialW=iniW), dilate_conv24=L.DilatedConvolution2D(BASE_CHANNEL // 2, BASE_CHANNEL // 8, ksize=3, stride=1, pad=23, dilate=23, nobias=False, initialW=None, initial_bias=None), convL=L.Convolution2D(BASE_CHANNEL * 13 // 8, CLASS_NUM, ksize=3, stride=1, pad=1, initialW=iniW), # 128x128 to 128x128 # batch normalization bnF1=L.BatchNormalization(3), bnF2=L.BatchNormalization(BASE_CHANNEL), bnF3=L.BatchNormalization(BASE_CHANNEL), bn1x1_D3=L.BatchNormalization(BASE_CHANNEL), bnD3=L.BatchNormalization(BASE_CHANNEL // 2), bn1x1_D6=L.BatchNormalization(BASE_CHANNEL * 9 // 8), bnD6=L.BatchNormalization(BASE_CHANNEL // 2), bn1x1_D12=L.BatchNormalization(BASE_CHANNEL * 10 // 8), bnD12=L.BatchNormalization(BASE_CHANNEL // 2), bn1x1_D18=L.BatchNormalization(BASE_CHANNEL * 11 // 8), bnD18=L.BatchNormalization(BASE_CHANNEL // 2), bn1x1_D24=L.BatchNormalization(BASE_CHANNEL * 12 // 8), bnD24=L.BatchNormalization(BASE_CHANNEL // 2), bnL=L.BatchNormalization(BASE_CHANNEL * 13 // 8), )
def __init__(self, in_ch, out_ch): w = chainer.initializers.GlorotUniform() super(CBR, self).__init__() with self.init_scope(): self.c0 = L.Convolution2D(in_ch, out_ch, 4, 2, 1, initialW=w) self.bn0 = L.BatchNormalization(out_ch)
def __init__(self, label): super(UNet3D, self).__init__() with self.init_scope(): #encorder pass self.conv1 = L.ConvolutionND(ndim=3, in_channels=4, out_channels=8, ksize=3, pad=0) self.bnc0 = L.BatchNormalization(8) self.conv2 = L.ConvolutionND(ndim=3, in_channels=8, out_channels=16, ksize=3, pad=0) self.bnc1 = L.BatchNormalization(16) self.conv3 = L.ConvolutionND(ndim=3, in_channels=16, out_channels=16, ksize=3, pad=0) self.bnc2 = L.BatchNormalization(16) self.conv4 = L.ConvolutionND(ndim=3, in_channels=16, out_channels=32, ksize=3, pad=0) self.bnc3 = L.BatchNormalization(32) self.conv5 = L.ConvolutionND(ndim=3, in_channels=32, out_channels=32, ksize=3, pad=0) self.bnc4 = L.BatchNormalization(32) self.conv6 = L.ConvolutionND(ndim=3, in_channels=32, out_channels=64, ksize=3, pad=0) self.bnc5 = L.BatchNormalization(64) #decorder pass self.dconv1 = L.DeconvolutionND(ndim=3, in_channels=64, out_channels=64, ksize=2, stride=2) self.conv7 = L.ConvolutionND(ndim=3, in_channels=32 + 64, out_channels=32, ksize=3, pad=0) self.bnd4 = L.BatchNormalization(32) self.conv8 = L.ConvolutionND(ndim=3, in_channels=32, out_channels=32, ksize=3, pad=0) self.bnd3 = L.BatchNormalization(32) self.dconv2 = L.DeconvolutionND(ndim=3, in_channels=32, out_channels=32, ksize=2, stride=2) self.conv9 = L.ConvolutionND(ndim=3, in_channels=16 + 32, out_channels=16, ksize=3, pad=0) self.bnd2 = L.BatchNormalization(16) self.conv10 = L.ConvolutionND(ndim=3, in_channels=16, out_channels=16, ksize=3, pad=0) self.bnd1 = L.BatchNormalization(16) self.lcl = L.ConvolutionND(ndim=3, in_channels=16, out_channels=label, ksize=1, pad=0)
def __init__(self, n_hidden, n_out, n_input=None): super(BasicGeneratorNetwork, self).__init__() with self.init_scope(): self.l1 = L.Linear(n_input, n_hidden) self.l2 = L.BatchNormalization(n_hidden) self.l3 = L.Linear(n_hidden, n_out)
def __init__(self, class_labels=10): super(VGG, self).__init__() with self.init_scope(): self.l1_1 = L.Convolution2D(None, 64, 3, pad=1, nobias=True) self.b1_1 = L.BatchNormalization(64) self.l1_2 = L.Convolution2D(None, 64, 3, pad=1, nobias=True) self.b1_2 = L.BatchNormalization(64) self.l2_1 = L.Convolution2D(None, 128, 3, pad=1, nobias=True) self.b2_1 = L.BatchNormalization(128) self.l2_2 = L.Convolution2D(None, 128, 3, pad=1, nobias=True) self.b2_2 = L.BatchNormalization(128) self.l3_1 = L.Convolution2D(None, 256, 3, pad=1, nobias=True) self.b3_1 = L.BatchNormalization(256) self.l3_2 = L.Convolution2D(None, 256, 3, pad=1, nobias=True) self.b3_2 = L.BatchNormalization(256) self.l3_3 = L.Convolution2D(None, 256, 3, pad=1, nobias=True) self.b3_3 = L.BatchNormalization(256) self.l4_1 = L.Convolution2D(None, 512, 3, pad=1, nobias=True) self.b4_1 = L.BatchNormalization(512) self.l4_2 = L.Convolution2D(None, 512, 3, pad=1, nobias=True) self.b4_2 = L.BatchNormalization(512) self.l4_3 = L.Convolution2D(None, 512, 3, pad=1, nobias=True) self.b4_3 = L.BatchNormalization(512) self.l5_1 = L.Convolution2D(None, 512, 3, pad=1, nobias=True) self.b5_1 = L.BatchNormalization(512) self.l5_2 = L.Convolution2D(None, 512, 3, pad=1, nobias=True) self.b5_2 = L.BatchNormalization(512) self.l5_3 = L.Convolution2D(None, 512, 3, pad=1, nobias=True) self.b5_3 = L.BatchNormalization(512) # self.fc1 = L.Linear(None, 512, nobias=True) self.fc1 = L.Linear(None, 128, nobias=True) # self.bn_fc1 = L.BatchNormalization(512) self.bn_fc1 = L.BatchNormalization(128) self.fc2 = L.Linear(None, class_labels, nobias=True) self.fc1_out = ''
def __init__(self, in_channels): super(PostActivation, self).__init__() with self.init_scope(): self.bn = L.BatchNormalization(size=in_channels) self.activ = F.relu
def __init__(self): super().__init__( conv=L.Convolution2D(3, 32, 3, stride=2, pad=0), conv_1=L.Convolution2D(32, 32, 3, stride=1, pad=0), conv_2=L.Convolution2D(32, 64, 3, stride=1, pad=1), conv_3=L.Convolution2D(64, 80, 1, stride=1, pad=0), conv_4=L.Convolution2D(80, 192, 3, stride=1, pad=0), bn_conv=L.BatchNormalization(32), bn_conv_1=L.BatchNormalization(32), bn_conv_2=L.BatchNormalization(64), bn_conv_3=L.BatchNormalization(80), bn_conv_4=L.BatchNormalization(192), mixed=Mixed([ ('conv', Tower([('conv', L.Convolution2D(192, 64, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(64)), ('_relu', F.relu)])), ('tower', Tower([('conv', L.Convolution2D(192, 48, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(48)), ('_relu', F.relu), ('conv_1', L.Convolution2D(48, 64, 5, stride=1, pad=2)), ('bn_conv_1', L.BatchNormalization(64)), ('_relu_1', F.relu)])), ('tower_1', Tower([('conv', L.Convolution2D(192, 64, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(64)), ('_relu', F.relu), ('conv_1', L.Convolution2D(64, 96, 3, stride=1, pad=1)), ('bn_conv_1', L.BatchNormalization(96)), ('_relu_1', F.relu), ('conv_2', L.Convolution2D(96, 96, 3, stride=1, pad=1)), ('bn_conv_2', L.BatchNormalization(96)), ('_relu_2', F.relu)])), ('tower_2', Tower([('_pooling', _average_pooling_2d), ('conv', L.Convolution2D(192, 32, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(32)), ('_relu', F.relu)])) ]), mixed_1=Mixed([ ('conv', Tower([('conv', L.Convolution2D(256, 64, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(64)), ('_relu', F.relu)])), ('tower', Tower([('conv', L.Convolution2D(256, 48, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(48)), ('_relu', F.relu), ('conv_1', L.Convolution2D(48, 64, 5, stride=1, pad=2)), ('bn_conv_1', L.BatchNormalization(64)), ('_relu_1', F.relu)])), ('tower_1', Tower([('conv', L.Convolution2D(256, 64, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(64)), ('_relu', F.relu), ('conv_1', L.Convolution2D(64, 96, 3, stride=1, pad=1)), ('bn_conv_1', L.BatchNormalization(96)), ('_relu_1', F.relu), ('conv_2', L.Convolution2D(96, 96, 3, stride=1, pad=1)), ('bn_conv_2', L.BatchNormalization(96)), ('_relu_2', F.relu)])), ('tower_2', Tower([('_pooling', _average_pooling_2d), ('conv', L.Convolution2D(256, 64, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(64)), ('_relu', F.relu)])) ]), mixed_2=Mixed([ ('conv', Tower([('conv', L.Convolution2D(288, 64, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(64)), ('_relu', F.relu)])), ('tower', Tower([('conv', L.Convolution2D(288, 48, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(48)), ('_relu', F.relu), ('conv_1', L.Convolution2D(48, 64, 5, stride=1, pad=2)), ('bn_conv_1', L.BatchNormalization(64)), ('_relu_1', F.relu)])), ('tower_1', Tower([('conv', L.Convolution2D(288, 64, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(64)), ('_relu', F.relu), ('conv_1', L.Convolution2D(64, 96, 3, stride=1, pad=1)), ('bn_conv_1', L.BatchNormalization(96)), ('_relu_1', F.relu), ('conv_2', L.Convolution2D(96, 96, 3, stride=1, pad=1)), ('bn_conv_2', L.BatchNormalization(96)), ('_relu_2', F.relu)])), ('tower_2', Tower([('_pooling', _average_pooling_2d), ('conv', L.Convolution2D(288, 64, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(64)), ('_relu', F.relu)])) ]), mixed_3=Mixed([ ('conv', Tower([('conv', L.Convolution2D(288, 384, 3, stride=2, pad=0)), ('bn_conv', L.BatchNormalization(384)), ('_relu', F.relu)])), ('tower', Tower([('conv', L.Convolution2D(288, 64, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(64)), ('_relu', F.relu), ('conv_1', L.Convolution2D(64, 96, 3, stride=1, pad=1)), ('bn_conv_1', L.BatchNormalization(96)), ('_relu_1', F.relu), ('conv_2', L.Convolution2D(96, 96, 3, stride=2, pad=0)), ('bn_conv_2', L.BatchNormalization(96)), ('_relu_2', F.relu)])), ('pool', Tower([('_pooling', _max_pooling_2d_320)])) ]), mixed_4=Mixed([ ('conv', Tower([('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(192)), ('_relu', F.relu)])), ('tower', Tower([('conv', L.Convolution2D(768, 128, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(128)), ('_relu', F.relu), ('conv_1', L.Convolution2D(128, 128, (1, 7), stride=1, pad=(0, 3))), ('bn_conv_1', L.BatchNormalization(128)), ('_relu_1', F.relu), ('conv_2', L.Convolution2D(128, 192, (7, 1), stride=1, pad=(3, 0))), ('bn_conv_2', L.BatchNormalization(192)), ('_relu_2', F.relu)])), ('tower_1', Tower([('conv', L.Convolution2D(768, 128, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(128)), ('_relu', F.relu), ('conv_1', L.Convolution2D(128, 128, (7, 1), stride=1, pad=(3, 0))), ('bn_conv_1', L.BatchNormalization(128)), ('_relu_1', F.relu), ('conv_2', L.Convolution2D(128, 128, (1, 7), stride=1, pad=(0, 3))), ('bn_conv_2', L.BatchNormalization(128)), ('_relu_2', F.relu), ('conv_3', L.Convolution2D(128, 128, (7, 1), stride=1, pad=(3, 0))), ('bn_conv_3', L.BatchNormalization(128)), ('_relu_3', F.relu), ('conv_4', L.Convolution2D(128, 192, (1, 7), stride=1, pad=(0, 3))), ('bn_conv_4', L.BatchNormalization(192)), ('_relu_4', F.relu)])), ('tower_2', Tower([('_pooling', _average_pooling_2d), ('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(192)), ('_relu', F.relu)])) ]), mixed_5=Mixed([ ('conv', Tower([('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(192)), ('_relu', F.relu)])), ('tower', Tower([('conv', L.Convolution2D(768, 160, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(160)), ('_relu', F.relu), ('conv_1', L.Convolution2D(160, 160, (1, 7), stride=1, pad=(0, 3))), ('bn_conv_1', L.BatchNormalization(160)), ('_relu_1', F.relu), ('conv_2', L.Convolution2D(160, 192, (7, 1), stride=1, pad=(3, 0))), ('bn_conv_2', L.BatchNormalization(192)), ('_relu_2', F.relu)])), ('tower_1', Tower([('conv', L.Convolution2D(768, 160, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(160)), ('_relu', F.relu), ('conv_1', L.Convolution2D(160, 160, (7, 1), stride=1, pad=(3, 0))), ('bn_conv_1', L.BatchNormalization(160)), ('_relu_1', F.relu), ('conv_2', L.Convolution2D(160, 160, (1, 7), stride=1, pad=(0, 3))), ('bn_conv_2', L.BatchNormalization(160)), ('_relu_2', F.relu), ('conv_3', L.Convolution2D(160, 160, (7, 1), stride=1, pad=(3, 0))), ('bn_conv_3', L.BatchNormalization(160)), ('_relu_3', F.relu), ('conv_4', L.Convolution2D(160, 192, (1, 7), stride=1, pad=(0, 3))), ('bn_conv_4', L.BatchNormalization(192)), ('_relu_4', F.relu)])), ('tower_2', Tower([('_pooling', _average_pooling_2d), ('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(192)), ('_relu', F.relu)])) ]), mixed_6=Mixed([ ('conv', Tower([('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(192)), ('_relu', F.relu)])), ('tower', Tower([('conv', L.Convolution2D(768, 160, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(160)), ('_relu', F.relu), ('conv_1', L.Convolution2D(160, 160, (1, 7), stride=1, pad=(0, 3))), ('bn_conv_1', L.BatchNormalization(160)), ('_relu_1', F.relu), ('conv_2', L.Convolution2D(160, 192, (7, 1), stride=1, pad=(3, 0))), ('bn_conv_2', L.BatchNormalization(192)), ('_relu_2', F.relu)])), ('tower_1', Tower([('conv', L.Convolution2D(768, 160, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(160)), ('_relu', F.relu), ('conv_1', L.Convolution2D(160, 160, (7, 1), stride=1, pad=(3, 0))), ('bn_conv_1', L.BatchNormalization(160)), ('_relu_1', F.relu), ('conv_2', L.Convolution2D(160, 160, (1, 7), stride=1, pad=(0, 3))), ('bn_conv_2', L.BatchNormalization(160)), ('_relu_2', F.relu), ('conv_3', L.Convolution2D(160, 160, (7, 1), stride=1, pad=(3, 0))), ('bn_conv_3', L.BatchNormalization(160)), ('_relu_3', F.relu), ('conv_4', L.Convolution2D(160, 192, (1, 7), stride=1, pad=(0, 3))), ('bn_conv_4', L.BatchNormalization(192)), ('_relu_4', F.relu)])), ('tower_2', Tower([('_pooling', _average_pooling_2d), ('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(192)), ('_relu', F.relu)])) ]), mixed_7=Mixed([ ('conv', Tower([('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(192)), ('_relu', F.relu)])), ('tower', Tower([('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(192)), ('_relu', F.relu), ('conv_1', L.Convolution2D(192, 192, (1, 7), stride=1, pad=(0, 3))), ('bn_conv_1', L.BatchNormalization(192)), ('_relu_1', F.relu), ('conv_2', L.Convolution2D(192, 192, (7, 1), stride=1, pad=(3, 0))), ('bn_conv_2', L.BatchNormalization(192)), ('_relu_2', F.relu)])), ('tower_1', Tower([('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(192)), ('_relu', F.relu), ('conv_1', L.Convolution2D(192, 192, (7, 1), stride=1, pad=(3, 0))), ('bn_conv_1', L.BatchNormalization(192)), ('_relu_1', F.relu), ('conv_2', L.Convolution2D(192, 192, (1, 7), stride=1, pad=(0, 3))), ('bn_conv_2', L.BatchNormalization(192)), ('_relu_2', F.relu), ('conv_3', L.Convolution2D(192, 192, (7, 1), stride=1, pad=(3, 0))), ('bn_conv_3', L.BatchNormalization(192)), ('_relu_3', F.relu), ('conv_4', L.Convolution2D(192, 192, (1, 7), stride=1, pad=(0, 3))), ('bn_conv_4', L.BatchNormalization(192)), ('_relu_4', F.relu)])), ('tower_2', Tower([('_pooling', _average_pooling_2d), ('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(192)), ('_relu', F.relu)])) ]), mixed_8=Mixed([ ('tower', Tower([('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(192)), ('_relu', F.relu), ('conv_1', L.Convolution2D(192, 320, 3, stride=2, pad=0)), ('bn_conv_1', L.BatchNormalization(320)), ('_relu_1', F.relu)])), ('tower_1', Tower([('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(192)), ('_relu', F.relu), ('conv_1', L.Convolution2D(192, 192, (1, 7), stride=1, pad=(0, 3))), ('bn_conv_1', L.BatchNormalization(192)), ('_relu_1', F.relu), ('conv_2', L.Convolution2D(192, 192, (7, 1), stride=1, pad=(3, 0))), ('bn_conv_2', L.BatchNormalization(192)), ('_relu_2', F.relu), ('conv_3', L.Convolution2D(192, 192, 3, stride=2, pad=0)), ('bn_conv_3', L.BatchNormalization(192)), ('_relu_3', F.relu)])), ('pool', Tower([('_pooling', _max_pooling_2d_320)])) ]), mixed_9=Mixed([ ('conv', Tower([ ('conv', L.Convolution2D(1280, 320, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(320)), ('_relu', F.relu), ])), ('tower', Tower([('conv', L.Convolution2D(1280, 384, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(384)), ('_relu', F.relu), ('mixed', Mixed([('conv', Tower([ ('conv', L.Convolution2D(384, 384, (1, 3), stride=1, pad=(0, 1))), ('bn_conv', L.BatchNormalization(384)), ('_relu', F.relu), ])), ('conv_1', Tower([ ('conv_1', L.Convolution2D(384, 384, (3, 1), stride=1, pad=(1, 0))), ('bn_conv_1', L.BatchNormalization(384)), ('_relu_1', F.relu), ]))]))])), ('tower_1', Tower([('conv', L.Convolution2D(1280, 448, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(448)), ('_relu', F.relu), ('conv_1', L.Convolution2D(448, 384, 3, stride=1, pad=1)), ('bn_conv_1', L.BatchNormalization(384)), ('_relu_1', F.relu), ('mixed', Mixed([('conv', Tower([ ('conv', L.Convolution2D(384, 384, (1, 3), stride=1, pad=(0, 1))), ('bn_conv', L.BatchNormalization(384)), ('_relu', F.relu), ])), ('conv_1', Tower([ ('conv_1', L.Convolution2D(384, 384, (3, 1), stride=1, pad=(1, 0))), ('bn_conv_1', L.BatchNormalization(384)), ('_relu_1', F.relu), ]))]))])), ('tower_2', Tower([('_pooling', _average_pooling_2d), ('conv', L.Convolution2D(1280, 192, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(192)), ('_relu', F.relu)])) ]), mixed_10=Mixed([ ('conv', Tower([ ('conv', L.Convolution2D(2048, 320, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(320)), ('_relu', F.relu), ])), ('tower', Tower([('conv', L.Convolution2D(2048, 384, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(384)), ('_relu', F.relu), ('mixed', Mixed([('conv', Tower([ ('conv', L.Convolution2D(384, 384, (1, 3), stride=1, pad=(0, 1))), ('bn_conv', L.BatchNormalization(384)), ('_relu', F.relu), ])), ('conv_1', Tower([ ('conv_1', L.Convolution2D(384, 384, (3, 1), stride=1, pad=(1, 0))), ('bn_conv_1', L.BatchNormalization(384)), ('_relu_1', F.relu), ]))]))])), ('tower_1', Tower([ ('conv', L.Convolution2D(2048, 448, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(448)), ('_relu', F.relu), ('conv_1', L.Convolution2D(448, 384, 3, stride=1, pad=1)), ('bn_conv_1', L.BatchNormalization(384)), ('_relu_1', F.relu), ('mixed', Mixed([('conv', Tower([('conv', L.Convolution2D(384, 384, (1, 3), stride=1, pad=(0, 1))), ('bn_conv', L.BatchNormalization(384)), ('_relu', F.relu)])), ('conv_1', Tower([('conv_1', L.Convolution2D(384, 384, (3, 1), stride=1, pad=(1, 0))), ('bn_conv_1', L.BatchNormalization(384)), ('_relu_1', F.relu)]))])) ])), ('tower_2', Tower([('_pooling', _max_pooling_2d), ('conv', L.Convolution2D(2048, 192, 1, stride=1, pad=0)), ('bn_conv', L.BatchNormalization(192)), ('_relu', F.relu)])) ]), logit=L.Linear(2048, 1008))
def __init__(self, in_channels, out_channels, ksize, pad=(0,0)): super().__init__() with self.init_scope(): self.conv = L.Convolution2D(in_channels, out_channels, ksize, pad=pad, nobias=False, initialW=LeCunNormal()) self.bn = L.BatchNormalization(out_channels)
def __init__(self, class_labels=10): super(DPN92, self).__init__() self.k_R = 96 self.num_init_features = 64 self.g = 32 self.k_sec = (3, 4, 20, 3) self.inc_sec = (16, 32, 24, 128) blocks = OrderedDict() blocks['conv1'] = Sequential( L.Convolution2D(3, self.num_init_features, ksize=7, stride=2, pad=3, nobias=True), L.BatchNormalization(self.num_init_features), F.relu, MaxPooling2D(ksize=3, stride=2, pad=1)) bw = 256 inc = self.inc_sec[0] R = int((self.k_R * bw) / 256) blocks['conv2_1'] = DualPathBlock(self.num_init_features, R, R, bw, inc, self.g, 'proj') in_chs = bw + 3 * inc for i in range(2, self.k_sec[0] + 1): blocks['conv2_{}'.format(i)] = DualPathBlock( in_chs, R, R, bw, inc, self.g, 'normal') in_chs += inc bw = 512 inc = self.inc_sec[1] R = int((self.k_R * bw) / 256) blocks['conv3_1'] = DualPathBlock(in_chs, R, R, bw, inc, self.g, 'down') in_chs = bw + 3 * inc for i in range(2, self.k_sec[1] + 1): blocks['conv3_{}'.format(i)] = DualPathBlock( in_chs, R, R, bw, inc, self.g, 'normal') in_chs += inc bw = 1024 inc = self.inc_sec[2] R = int((self.k_R * bw) / 256) blocks['conv4_1'] = DualPathBlock(in_chs, R, R, bw, inc, self.g, 'down') in_chs = bw + 3 * inc for i in range(2, self.k_sec[2] + 1): blocks['conv4_{}'.format(i)] = DualPathBlock( in_chs, R, R, bw, inc, self.g, 'normal') in_chs += inc bw = 2048 inc = self.inc_sec[3] R = int((self.k_R * bw) / 256) blocks['conv5_1'] = DualPathBlock(in_chs, R, R, bw, inc, self.g, 'down') in_chs = bw + 3 * inc for i in range(2, self.k_sec[3] + 1): blocks['conv5_{}'.format(i)] = DualPathBlock( in_chs, R, R, bw, inc, self.g, 'normal') in_chs += inc with self.init_scope(): self.features = Sequential(blocks) self.classifier = L.Linear(in_chs, class_labels)
def __init__(self, depth=18, alpha=16, start_channel=16, skip=False): super(PyramidNet, self).__init__() channel_diff = float(alpha) / depth channel = start_channel links = [('bconv1', BatchConv2D(3, channel, 3, 1, 1))] skip_size = depth * 3 - 3 for i in six.moves.range(depth): if skip: skip_ratio = float(i) / skip_size * 0.5 else: skip_ratio = 0 in_channel = channel channel += channel_diff links.append(('py{}'.format(len(links)), PyramidBlock(int(round(in_channel)), int(round(channel)), skip_ratio=skip_ratio))) in_channel = channel channel += channel_diff links.append(('py{}'.format(len(links)), PyramidBlock(int(round(in_channel)), int(round(channel)), stride=2))) for i in six.moves.range(depth - 1): if skip: skip_ratio = float(i + depth) / skip_size * 0.5 else: skip_ratio = 0 in_channel = channel channel += channel_diff links.append(('py{}'.format(len(links)), PyramidBlock(int(round(in_channel)), int(round(channel)), skip_ratio=skip_ratio))) in_channel = channel channel += channel_diff links.append(('py{}'.format(len(links)), PyramidBlock(int(round(in_channel)), int(round(channel)), stride=2))) for i in six.moves.range(depth - 1): if skip: skip_ratio = float(i + depth * 2 - 1) / skip_size * 0.5 else: skip_ratio = 0 in_channel = channel channel += channel_diff links.append(('py{}'.format(len(links)), PyramidBlock(int(round(in_channel)), int(round(channel)), skip_ratio=skip_ratio))) links.append(('bn{}'.format(len(links)), L.BatchNormalization(int(round(channel))))) links.append(('_relu{}'.format(len(links)), F.ReLU())) links.append(('_apool{}'.format(len(links)), F.AveragePooling2D(8, 1, 0, False, True))) links.append( ('fc{}'.format(len(links)), L.Linear(int(round(channel)), 10))) for name, f in links: if not name.startswith('_'): self.add_link(*(name, f)) self.layers = links
def __init__(self, feature_map_nc, output_nc, w_init=None): super(Generator, self).__init__(c1=L.Convolution2D(None, feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init), c2=L.Convolution2D(None, 2 * feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init), c3=L.Convolution2D(None, 4 * feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init), c4=L.Convolution2D(None, 8 * feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init), c5=L.Convolution2D(None, 8 * feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init), c6=L.Convolution2D(None, 8 * feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init), c7=L.Convolution2D(None, 8 * feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init), c8=L.Convolution2D(None, 8 * feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init), dc1=L.Deconvolution2D(None, 8 * feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init), dc2=L.Deconvolution2D(None, 8 * feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init), dc3=L.Deconvolution2D(None, 8 * feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init), dc4=L.Deconvolution2D(None, 8 * feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init), dc5=L.Deconvolution2D(None, 4 * feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init), dc6=L.Deconvolution2D(None, 2 * feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init), dc7=L.Deconvolution2D(None, feature_map_nc, ksize=4, stride=2, pad=1, initialW=w_init), dc8=L.Deconvolution2D(None, output_nc, ksize=4, stride=2, pad=1, initialW=w_init), b2=L.BatchNormalization(2 * feature_map_nc), b3=L.BatchNormalization(4 * feature_map_nc), b4=L.BatchNormalization(8 * feature_map_nc), b5=L.BatchNormalization(8 * feature_map_nc), b6=L.BatchNormalization(8 * feature_map_nc), b7=L.BatchNormalization(8 * feature_map_nc), b8=L.BatchNormalization(8 * feature_map_nc), b1_d=L.BatchNormalization(8 * feature_map_nc), b2_d=L.BatchNormalization(8 * feature_map_nc), b3_d=L.BatchNormalization(8 * feature_map_nc), b4_d=L.BatchNormalization(8 * feature_map_nc), b5_d=L.BatchNormalization(4 * feature_map_nc), b6_d=L.BatchNormalization(2 * feature_map_nc), b7_d=L.BatchNormalization(feature_map_nc))
def __init__(self): super(ResidualBlock_64, self).__init__(L.Convolution2D(64, 64, 3, pad=1), L.BatchNormalization(64), L.Convolution2D(64, 64, 3, pad=1), L.BatchNormalization(64))
def __init__(self, init_ch=6, ch=8, out_ch=3, activation=F.relu, distribution="normal", batch_size=64, dim_z=3, bottom_size=32): super(Generator, self).__init__() initializer = chainer.initializers.GlorotUniform() #initializer_u = chainer.initializers.Uniform(scale=1) #initializer_v = chainer.initializers.Uniform(scale=1) self.activation = activation self.distribution = distribution self.batch_size = batch_size self.dim_z = dim_z self.ch = ch with self.init_scope(): # Encoder self.enc1 = Block(init_ch, ch, activation=activation, batch_size=batch_size, is_shortcut=True, dim_z=dim_z) self.enc2 = Block(ch, ch * 2, activation=activation, batch_size=batch_size, is_shortcut=True, dim_z=dim_z) self.enc3 = Block(ch * 2, ch * 2, activation=activation, batch_size=batch_size, is_shortcut=True, dim_z=dim_z) self.linear = L.Linear(ch * 2 * (bottom_size * bottom_size), ch * 2 * (bottom_size * bottom_size)) # WIP: I have not finished implemented this. # This code means reduction of dimension. # self.linear = SVDLinear(ch * 4 * (bottom_size * bottom_size), (ch * 4 * (bottom_size * bottom_size)), k=(bottom_size * bottom_size * ch * 4), initialU=initializer_u, initialV=initializer_v) self.b4 = L.BatchNormalization(ch * 2 * (bottom_size * bottom_size)) self.dec1 = Block(ch * 2, ch * 2, activation=activation, batch_size=batch_size, is_shortcut=False, dim_z=dim_z) self.dec2 = Block(ch * 2, ch, activation=activation, batch_size=batch_size, is_shortcut=False, dim_z=dim_z) self.b8 = L.BatchNormalization(ch) self.l8 = L.Convolution2D(ch, out_ch, ksize=3, stride=1, pad=1, initialW=initializer)