def __init__(self, channels_z): super().__init__() with self.init_scope(): self.mean_z = L.Convolution2D( None, channels_z, ksize=5, stride=1, pad=2, initialW=HeNormal(0.1)) self.ln_var_z = L.Convolution2D( None, channels_z, ksize=5, stride=1, pad=2, initialW=HeNormal(0.1))
def __init__(self, channels): super().__init__() with self.init_scope(): self.conv_x_1 = L.Convolution2D( None, channels // 2, ksize=2, stride=2, pad=0, initialW=HeNormal(0.1)) self.conv_x_2 = L.Convolution2D( None, channels, ksize=2, stride=2, pad=0, initialW=HeNormal(0.1))
def __init__(self, opt): super().__init__() xavier_w = Normal() he_w = HeNormal() C, H, W = opt.img_shape ngf = opt.ngf label_ch = opt.class_num self.opt = opt layer_num = 6 init_shape = (ngf * 8, H // 2**layer_num, W // 2**layer_num) with self.init_scope(): self.w1 = ConstantFCN(opt, opt.class_num) self.w2 = ConstantFCN(opt, opt.c_shape[0]) self.head_reshape = lambda x: F.resize_images(x, init_shape[1:]) self.head = define_conv(opt)(label_ch, init_shape[0], ksize=3, pad=1, initialW=he_w) #512 x 4 x 4 self.r1 = SPADEResBlock(opt, ngf * 8, ngf * 8) self.up1 = define_upsampling(opt, ngf * 8) #512 x 8 x 8 self.r2 = SPADEResBlock(opt, ngf * 8, ngf * 8) self.up2 = define_upsampling(opt, ngf * 8) #512 x 16 x 16 self.r3 = SPADEResBlock(opt, ngf * 8, ngf * 8) self.up3 = define_upsampling(opt, ngf * 8) #512 x 32 x 32 self.r4 = SPADEResBlock(opt, ngf * 8, ngf * 4) self.up4 = define_upsampling(opt, ngf * 4) #256 x 64 x 64 self.r5 = SPADEResBlock(opt, ngf * 4, ngf * 2) self.up5 = define_upsampling(opt, ngf * 2) #128 x 128 x 128 self.r6 = SPADEResBlock(opt, ngf * 2, ngf) self.up6 = define_upsampling(opt, ngf) #64 x 256 x 256 self.r7 = SPADEResBlock(opt, ngf, ngf // 2) #32 x 256 x 256 self.to_img = L.Convolution2D(ngf // 2, 3, ksize=3, pad=1, initialW=xavier_w)
def __init__(self, in_channels, out_channels): super(ConvolutionBlock, self).__init__() with self.init_scope(): self.conv = Convolution2D(in_channels, out_channels, ksize=7, stride=2, pad=3, initialW=HeNormal())
def __init__(self, channels): super().__init__() with self.init_scope(): self.deconv = nn.Deconvolution2D(None, channels, ksize=4, stride=4, pad=0, initialW=HeNormal(0.1))
def __init__(self): super().__init__() with self.init_scope(): self.mean_x = L.Convolution2D(None, 3, ksize=1, stride=1, pad=0, initialW=HeNormal(0.1))
def __init__(self, z_channels): super().__init__() with self.init_scope(): self.conv = nn.Convolution2D(None, z_channels * 2, ksize=5, stride=1, pad=2, initialW=HeNormal(0.1))
def __init__(self, opt, input_ch, output_ch, rate=2): super().__init__() he_w = HeNormal() output_ch = output_ch * rate**2 with self.init_scope(): self.c = define_conv(opt)(input_ch, output_ch, ksize=3, stride=1, pad=1, initialW=he_w) self.ps_func = lambda x: F.depth2space(x, rate)
def __init__(self, in_channels, out_channels): super(ResidualBlockB, self).__init__() with self.init_scope(): self.res_branch1 = Convolution2D(in_channels, out_channels, ksize=1, stride=2, initialW=HeNormal()) self.res_branch2a = Convolution2D(in_channels, out_channels, ksize=3, stride=2, pad=1, initialW=HeNormal()) self.res_branch2b = Convolution2D(out_channels, out_channels, ksize=3, pad=1, initialW=HeNormal())
def __init__(self): super(Generator, self).__init__() with self.init_scope(): self.conv0 = L.Convolution2D(1, 256, ksize=(1, 2)) for i in range(10): self.add_link(f"resBlock{i}", ResBlock(256, 256, 2**(i + 1))) # self.l0 = L.Linear(128, 64) self.conv1 = L.Convolution2D(256, 256, 1) self.conv2 = L.Convolution2D(256, 1, 1) self.id = L.EmbedID(108, 8, initialW=HeNormal())
def __init__(self, n_out=7): super(VGG_double, self).__init__(conv=L.Convolution2D(6, 3, ksize=3, stride=1, pad=1, initialW=HeNormal()), model=L.VGG16Layers(), fc=L.Linear(4096, n_out))
def define_upsampling(opt, input_ch, output_ch=None): if opt.upsampling_mode == 'bilinear': seq = Sequential(lambda x: F.resize_images( x, (x.shape[2] * 2, x.shape[3] * 2), mode='bilinear')) if output_ch is not None: seq.append( define_conv(opt)(input_ch, output_ch, ksize=3, stride=1, pad=1, initialW=HeNormal())) return seq if opt.upsampling_mode == 'nearest': seq = Sequential(lambda x: F.resize_images( x, (x.shape[2] * 2, x.shape[3] * 2), mode='nearest')) if output_ch is not None: seq.append( define_conv(opt)(input_ch, output_ch, ksize=3, stride=1, pad=1, initialW=HeNormal())) return seq if opt.upsampling_mode == 'deconv': return define_deconv(opt)(input_ch, input_ch if output_ch is None else output_ch, ksize=3, stride=1, pad=1, initialW=HeNormal()) if opt.upsampling_mode == 'subpx_conv': return PixelShuffler(opt, input_ch, input_ch if output_ch is None else output_ch)
def __init__(self, channels): super().__init__() with self.init_scope(): self.conv1_1 = L.Convolution2D(None, channels, ksize=2, pad=0, stride=2, initialW=HeNormal(0.1)) self.conv1_2 = L.Convolution2D(None, channels // 2, ksize=3, pad=1, stride=1, initialW=HeNormal(0.1)) self.conv1_res = L.Convolution2D(None, channels, ksize=2, pad=0, stride=2, initialW=HeNormal(0.1)) self.conv1_3 = L.Convolution2D(None, channels, ksize=2, pad=0, stride=2, initialW=HeNormal(0.1)) self.conv2_1 = L.Convolution2D(None, channels // 2, ksize=3, pad=1, stride=1, initialW=HeNormal(0.1)) self.conv2_2 = L.Convolution2D(None, channels, ksize=3, pad=1, stride=1, initialW=HeNormal(0.1)) self.conv2_res = L.Convolution2D(None, channels, ksize=3, pad=1, stride=1, initialW=HeNormal(0.1)) self.conv2_3 = L.Convolution2D(None, channels, ksize=1, pad=0, stride=1, initialW=HeNormal(0.1))
def __init__(self, opt, ch): super().__init__() he_w = HeNormal() mid_ch = ch // opt.division_ch with self.init_scope(): self.f_conv = define_conv(opt)(ch, mid_ch, ksize=1, initialW=he_w) self.g_conv = define_conv(opt)(ch, mid_ch, ksize=1, initialW=he_w) self.h_conv = define_conv(opt)(ch, mid_ch, ksize=1, initialW=he_w) self.v_conv = define_conv(opt)(mid_ch, ch, ksize=1, initialW=he_w) self.gamma = Parameter(initializer=0, shape=1, name='SA-gamma')
def __init__(self): super(Model, self).__init__() with self.init_scope(): # self.convBlock=compressor self.conv = L.Convolution2D(1, 32, ksize=(1, 4), stride=2, pad=(0, 1)) for i in range(8): self.add_link(f"conv{i}", Conv(32, 32)) self.add_link(f"conv0_", Conv(32, 128)) for i in range(1, 8): self.add_link(f"conv{i}_", Conv(128, 128)) self.l1 = L.Linear(64, 64, initialW=HeNormal()) self.l2 = L.Linear(64, 64, initialW=HeNormal()) self.l3 = L.Linear(64, 2) self.l4 = L.Linear(512, 512, initialW=HeNormal()) self.l5 = L.Linear(512, 256, initialW=HeNormal()) self.l6 = L.Linear(256, 111, initialW=HeNormal())
def __init__(self, channels_chz, channels_u): super().__init__() with self.init_scope(): self.lstm_tanh = L.Convolution2D(None, channels_chz, ksize=5, stride=1, pad=2, initialW=HeNormal(0.1)) self.lstm_i = L.Convolution2D(None, channels_chz, ksize=5, stride=1, pad=2, initialW=HeNormal(0.1)) self.lstm_f = L.Convolution2D(None, channels_chz, ksize=5, stride=1, pad=2, initialW=HeNormal(0.1)) self.lstm_o = L.Convolution2D(None, channels_chz, ksize=5, stride=1, pad=2, initialW=HeNormal(0.1)) # self.pixel_shuffle = L.Convolution2D( # None, # channels_u * 16, # ksize=5, # stride=1, # pad=2, # initialW=HeNormal(0.1)) self.deconv_h = L.Deconvolution2D(None, channels_u, ksize=4, stride=4, pad=0, initialW=HeNormal(0.1))
def __init__(self, chz_channels, batchnorm_enabled, batchnorm_steps): super().__init__() with self.init_scope(): self.gru_u = nn.Convolution2D(None, chz_channels, ksize=5, stride=1, pad=2, initialW=HeNormal(0.1)) self.gru_r = nn.Convolution2D(None, chz_channels, ksize=5, stride=1, pad=2, initialW=HeNormal(0.1)) self.gru_tanh = nn.Convolution2D(None, chz_channels, ksize=5, stride=1, pad=2, initialW=HeNormal(0.1)) if batchnorm_enabled: batchnorm_r_array = chainer.ChainList() batchnorm_u_array = chainer.ChainList() batchnorm_tanh_array = chainer.ChainList() for t in range(batchnorm_steps): batchnorm_r_array.append( nn.BatchNormalization(chz_channels)) batchnorm_u_array.append( nn.BatchNormalization(chz_channels)) batchnorm_tanh_array.append( nn.BatchNormalization(chz_channels)) self.batchnorm_r_array = batchnorm_r_array self.batchnorm_u_array = batchnorm_u_array self.batchnorm_tanh_array = batchnorm_tanh_array else: self.batchnorm_r_array = None self.batchnorm_u_array = None self.batchnorm_tanh_array = None
def __init__(self, input_channels, inner_channels, output_channels, normalization, stride=2): super().__init__() with self.init_scope(): self.conv1 = L.Convolution2D(input_channels, inner_channels, ksize=1, stride=stride, pad=0, initialW=HeNormal(), nobias=True) self.norm1 = normalization(inner_channels, eps=1e-5) self.conv2 = L.Convolution2D(inner_channels, inner_channels, ksize=3, stride=1, pad=1, initialW=HeNormal(), nobias=True) self.norm2 = normalization(inner_channels, eps=1e-5) self.conv3 = L.Convolution2D(inner_channels, output_channels, ksize=1, stride=1, pad=0, initialW=HeNormal(), nobias=True) self.norm3 = normalization(output_channels, eps=1e-5) self.conv4 = L.Convolution2D(input_channels, output_channels, ksize=1, stride=stride, pad=0, initialW=HeNormal(), nobias=True) self.norm4 = normalization(output_channels, eps=1e-5)
def __init__(self, opt, in_ch, out_ch, out_conv_initW=HeNormal()): super().__init__() he_w = HeNormal() with self.init_scope(): self.noise1 = NoiseAdder(in_ch) self.norm1 = SPADE(opt, in_ch) self.conv1 = define_conv(opt)(in_ch, out_ch, ksize=3, pad=1, initialW=he_w) self.noise2 = NoiseAdder(out_ch) self.norm2 = SPADE(opt, out_ch) self.conv2 = define_conv(opt)(out_ch, out_ch, ksize=3, pad=1, initialW=out_conv_initW) self.activation = F.leaky_relu #if input channel is not equel with output channel, #input channel convert to output shape if in_ch != out_ch: self.reshape_noise = NoiseAdder(in_ch) self.reshape_norm = SPADE(opt, in_ch) self.reshape_act = self.activation self.reshape_conv = define_conv(opt)(in_ch, out_ch, ksize=1, initialW=out_conv_initW, nobias=True) else: self.reshape_noise = lambda x, mean=None, ln_var=None: x self.reshape_norm = lambda x, y: x self.reshape_act = lambda x: x self.reshape_conv = lambda x: x
def __init__(self, channels_r): super().__init__(conv1_1=L.Convolution2D(None, channels_r, ksize=2, pad=0, stride=2, initialW=HeNormal(0.1)), conv1_2=L.Convolution2D(None, channels_r // 2, ksize=3, pad=1, stride=1, initialW=HeNormal(0.1)), conv1_res=L.Convolution2D(None, channels_r, ksize=2, pad=0, stride=2, initialW=HeNormal(0.1)), conv1_3=L.Convolution2D(None, channels_r, ksize=2, pad=0, stride=2, initialW=HeNormal(0.1)), conv2_1=L.Convolution2D(None, channels_r // 2, ksize=3, pad=1, stride=1, initialW=HeNormal(0.1)), conv2_2=L.Convolution2D(None, channels_r, ksize=3, pad=1, stride=1, initialW=HeNormal(0.1)), conv2_res=L.Convolution2D(None, channels_r, ksize=3, pad=1, stride=1, initialW=HeNormal(0.1)), conv2_3=L.Convolution2D(None, channels_r, ksize=1, pad=0, stride=1, initialW=HeNormal(0.1)))
def __init__(self, chz_channels, channels_u): super().__init__(lstm_tanh=L.Convolution2D(None, chz_channels, ksize=5, stride=1, pad=2, initialW=HeNormal(0.1)), lstm_i=L.Convolution2D(None, chz_channels, ksize=5, stride=1, pad=2, initialW=HeNormal(0.1)), lstm_f=L.Convolution2D(None, chz_channels, ksize=5, stride=1, pad=2, initialW=HeNormal(0.1)), lstm_o=L.Convolution2D(None, chz_channels, ksize=5, stride=1, pad=2, initialW=HeNormal(0.1)), mean_z=L.Convolution2D(None, chz_channels, ksize=5, stride=1, pad=2, initialW=HeNormal(0.1)), ln_var_z=L.Convolution2D(None, chz_channels, ksize=5, stride=1, pad=2, initialW=HeNormal(0.1)), mean_x=L.Convolution2D(None, 3, ksize=1, stride=1, pad=0, initialW=HeNormal(0.1)), deconv_u=L.Deconvolution2D(None, channels_u, ksize=4, stride=4, pad=0, initialW=HeNormal(0.1)))
def __init__(self, output_dim, init_weights=False, filter_height=1): super(ComplexSmallNN, self).__init__() self.output_dim = output_dim self.filter_height = filter_height with self.init_scope(): if init_weights: # assert False, "Not Implemented Complex Initialization" self.conv1 = ComplexConv2D(None, 96, (1, 3), 1, (0, 1), initialW=ComplexInitial()) self.conv2 = ComplexConv2D(None, 256, (filter_height, 3), 1, (0, 1), initialW=ComplexInitial()) self.fc6 = L.Linear(None, 4096, initialW=HeNormal()) self.fc7 = L.Linear(None, output_dim, initialW=HeNormal()) self.bn1 = ComplexBatchNorm(96) self.bn2 = ComplexBatchNorm(256) else: self.conv1 = ComplexConv2D(None, 96, (1, 3), 1, (0, 1)) self.conv2 = ComplexConv2D(None, 256, (filter_height, 3), 1, (0, 1)) self.conv3 = ComplexConv2D(None, 384, (1, 3), 1, (0, 1)) self.conv4 = ComplexConv2D(None, 384, (1, 3), 1, (0, 1)) self.conv5 = ComplexConv2D(None, 256, (1, 3), 1, (0, 1)) self.fc6 = L.Linear(None, 4096) self.fc7 = L.Linear(None, 4096) self.fc8 = L.Linear(None, output_dim)
def __init__(self, output_dim, init_weights=False, filter_height=1): super(Alex, self).__init__() self.output_dim = output_dim self.filter_height = filter_height with self.init_scope(): if init_weights: self.conv1 = L.Convolution2D(None, 96, (1, 3), 1, (0, 1), initialW=GlorotNormal()) self.conv2 = L.Convolution2D(None, 256, (filter_height, 3), 1, (0, 1), initialW=GlorotNormal()) self.conv3 = L.Convolution2D(None, 384, (1, 3), 1, (0, 1), initialW=GlorotNormal()) self.conv4 = L.Convolution2D(None, 384, (1, 3), 1, (0, 1), initialW=GlorotNormal()) self.conv5 = L.Convolution2D(None, 256, (1, 3), 1, (0, 1), initialW=GlorotNormal()) self.fc6 = L.Linear(None, 4096, initialW=HeNormal()) self.fc7 = L.Linear(None, 4096, initialW=HeNormal()) self.fc8 = L.Linear(None, output_dim, initialW=HeNormal()) else: self.conv1 = L.Convolution2D(None, 96, (1, 3), 1, (0, 1)) self.conv2 = L.Convolution2D(None, 256, (filter_height, 3), 1, (0, 1)) self.conv3 = L.Convolution2D(None, 384, (1, 3), 1, (0, 1)) self.conv4 = L.Convolution2D(None, 384, (1, 3), 1, (0, 1)) self.conv5 = L.Convolution2D(None, 256, (1, 3), 1, (0, 1)) self.fc6 = L.Linear(None, 4096) self.fc7 = L.Linear(None, 4096) self.fc8 = L.Linear(None, output_dim)
def __init__(self, output_channels, normalization): super().__init__() with self.init_scope(): # Original ResNet-50 has a bias term in conv1, but ResNet-101 and # ResNet-152 don't. The original bias term is likely a mistake, so # this implementation omits it. # https://github.com/KaimingHe/deep-residual-networks/blob/master/prototxt/ResNet-50-deploy.prototxt self.conv = L.Convolution2D(None, output_channels, ksize=7, stride=2, pad=3, initialW=HeNormal(), nobias=True) self.norm = normalization(output_channels, eps=1e-5)
def __init__(self, n_class, scales): super(MaskHead, self).__init__() initialW = HeNormal(1, fan_option='fan_out') with self.init_scope(): self.conv1 = Conv2DActiv(256, 3, pad=1, initialW=initialW) self.conv2 = Conv2DActiv(256, 3, pad=1, initialW=initialW) self.conv3 = Conv2DActiv(256, 3, pad=1, initialW=initialW) self.conv4 = Conv2DActiv(256, 3, pad=1, initialW=initialW) self.conv5 = L.Deconvolution2D( 256, 2, pad=0, stride=2, initialW=initialW) self.seg = L.Convolution2D(n_class, 1, pad=0, initialW=initialW) self._n_class = n_class self._scales = scales
def __init__(self, channels_x, channels_h): super().__init__() self.channels_x = channels_x self.channels_h = channels_h with self.init_scope(): self.conv_1 = L.Convolution2D(channels_x, channels_h, ksize=3, stride=1, pad=1, initialW=(HeNormal(0.1))) self.conv_2 = L.Convolution2D(channels_h, channels_h, ksize=1, stride=1, pad=0, initialW=(HeNormal(0.1))) self.conv_3 = L.Convolution2D(channels_h, channels_x * 2, ksize=3, stride=1, pad=1, initialW=Zero())
def __init__(self): super(Model_, self).__init__() with self.init_scope(): # self.convBlock=compressor self.conv0 = L.Convolution2D(2, 32, ksize=3, stride=2) self.conv1 = L.Convolution2D(32, 64, ksize=3, stride=1) self.conv2 = L.Convolution2D(64, 64, ksize=3, stride=2) self.conv3_0 = L.Convolution2D(128, 64, ksize=1, stride=1) self.conv3_1 = L.Convolution2D(64, 64, ksize=3, stride=(1, 2)) self.conv3_2 = L.Convolution2D(128, 64, ksize=1, stride=1) self.conv3_3 = L.Convolution2D(64, 64, ksize=(1, 7), stride=1, pad=(0, 3)) self.conv3_4 = L.Convolution2D(64, 64, ksize=(7, 1), stride=1, pad=(3, 0)) self.conv3_5 = L.Convolution2D(64, 64, ksize=3, stride=(1, 2)) self.conv4 = L.Convolution2D(128, 128, ksize=3, stride=2) self.conv5_0 = L.Convolution2D(256, 256, ksize=3, stride=2) self.conv5_1 = L.Convolution2D(256, 256, ksize=1, stride=1) self.conv5_2 = L.Convolution2D(256, 256, ksize=3, stride=1, pad=1) self.conv5_3 = L.Convolution2D(256, 256, ksize=3, stride=2) self.conv6_0 = L.Convolution2D(768, 256, ksize=1, stride=1) self.conv6_1 = L.Convolution2D(256, 256, ksize=3, stride=2) self.conv6_2 = L.Convolution2D(768, 256, ksize=1, stride=1) self.conv6_3 = L.Convolution2D(256, 256, ksize=3, stride=2) self.conv6_4 = L.Convolution2D(768, 256, ksize=1, stride=1) self.conv6_5 = L.Convolution2D(256, 256, ksize=3, stride=1, pad=1) self.conv6_6 = L.Convolution2D(256, 256, ksize=3, stride=2) # self.conv = L.Convolution2D(, , ksize=3, stride=2) self.l0 = L.Linear(1536, 10, initialW=HeNormal()) for i in range(3): self.add_link(f"resBlockA{i}", Inception_ResNet_A()) # self.add_link(f"resBlockB0", Inception_ResNet_B(256*3)) for i in range(5): self.add_link(f"resBlockB{i}", Inception_ResNet_B(256 * 3, 256 * 3)) # self.add_link(f"resBlockC0", Inception_ResNet_B(256*3)) for i in range(3): self.add_link(f"resBlockC{i}", Inception_ResNet_B(1536, 1536)) # self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=2) self.window = xp.hanning(235).astype(xp.float32)
def build_generation_network(self, generation_steps, h_channels, z_channels, u_channels): core_array = [] prior_array = [] upsampler_h_u_array = [] with self.parameters.init_scope(): # LSTM core num_cores = 1 if self.hyperparams.generator_share_core else generation_steps for _ in range(num_cores): core = gqn.nn.generator.Core(h_channels=h_channels) core_array.append(core) self.parameters.append(core) # z prior sampler num_priors = 1 if self.hyperparams.generator_share_prior else generation_steps for _ in range(num_priors): prior = gqn.nn.generator.Prior(z_channels=z_channels) prior_array.append(prior) self.parameters.append(prior) # upsampler (h -> u) num_upsamplers = 1 if self.hyperparams.generator_share_upsampler else generation_steps scale = 4 for _ in range(num_upsamplers): if self.hyperparams.generator_subpixel_convolution_enabled: upsampler = gqn.nn.upsampler.SubPixelConvolutionUpsampler( channels=u_channels * scale**2, scale=scale) else: upsampler = gqn.nn.upsampler.DeconvolutionUpsampler( channels=u_channels) upsampler_h_u_array.append(upsampler) self.parameters.append(upsampler) # 1x1 conv (u -> x) if u_channels == 3: map_u_x = None else: map_u_x = nn.Convolution2D(u_channels, 3, ksize=1, stride=1, pad=0, initialW=HeNormal(0.1)) self.parameters.append(map_u_x) return core_array, prior_array, upsampler_h_u_array, map_u_x