def __init__(self, base=128): w = initializers.Normal(0.02) super(Generator, self).__init__() with self.init_scope(): self.c0 = L.Convolution2D(1, base, (15, 5), 1, (7, 2), initialW=w) self.cbg0 = C2BG(int(base / 2), base * 2, down=True) self.cbg1 = C2BG(base, base * 4, down=True) #self.cbg2 = C2BG(base*2, base*4, down=True) self.cc2 = L.Convolution2D(base * 2, base * 4, (3, 4), (1, 2), (1, 1), initialW=w) self.bb2 = L.BatchNormalization(base * 4) self.c1 = L.Convolution1D(2560, base * 2, 1, 1, 0, initialW=w) self.bn1 = L.BatchNormalization(base * 2) self.res0 = ResBlock(base * 2, base * 4) self.res1 = ResBlock(base * 2, base * 4) self.res2 = ResBlock(base * 2, base * 4) self.res3 = ResBlock(base * 2, base * 4) self.res4 = ResBlock(base * 2, base * 4) self.res5 = ResBlock(base * 2, base * 4) self.c2 = L.Convolution1D(base * 2, 2560, 1, 1, 0, initialW=w) self.bn2 = L.BatchNormalization(2560) self.dc3 = L.Deconvolution2D(base * 2, base * 8, (3, 4), (1, 2), (1, 1), initialW=w) self.bb3 = L.BatchNormalization(base * 8) #self.cbg3 = C2BG(base*2, base*8, up=True) self.cbg4 = C2BG(base * 4, base * 8, up=True) self.cbg5 = C2BG(base * 4, 72, up=True) self.c3 = L.Convolution2D(36, 1, 3, 1, 1, initialW=w)
def __init__(self, base=128): w = initializers.Normal(0.02) super(GeneratorWithCIN, self).__init__() with self.init_scope(): self.c0 = L.Convolution2D(1, base, (15, 5), 1, (7, 2), initialW=w) self.cbg0 = C2BG(int(base / 2), base * 2, down=True) self.cbg1 = C2BG(base, base * 4, down=True) self.c1 = L.Convolution1D(2304, base * 2, 1, 1, 0, initialW=w) self.bn1 = L.BatchNormalization(base * 2) self.res0 = ResBlock(base * 2, base * 4) self.res1 = ResBlock(base * 2, base * 4) self.res2 = ResBlock(base * 2, base * 4) self.res3 = ResBlock(base * 2, base * 4) self.res4 = ResBlock(base * 2, base * 4) self.res5 = ResBlock(base * 2, base * 4) self.res6 = ResBlock(base * 2, base * 4) self.res7 = ResBlock(base * 2, base * 4) self.res8 = ResBlock(base * 2, base * 4) self.c2 = L.Convolution1D(base * 2, 2304, 1, 1, 0, initialW=w) self.bn2 = L.BatchNormalization(2304) self.cbg2 = C2BG(base * 2, base * 8, up=True) self.cbg3 = C2BG(base * 4, 72, up=True) self.c3 = L.Convolution2D(36, 1, 3, 1, 1, initialW=w)
def __init__(self): super(TradingModel1D, self).__init__() with self.init_scope(): self.conv1 = L.Convolution1D(None, 64, ksize=4, stride=4) self.conv2 = L.Convolution1D(None, 512, ksize=4, stride=4) self.conv3 = L.Convolution1D(None, 1920, 4, 4) self.linear1 = L.Linear(None, 1280) self.linear2 = L.Linear(1280, 1280) self.linear3 = L.Linear(1280, 1)
def __init__(self, in_ch, out_ch, up=False, down=False): super(C1BG, self).__init__() w = initializers.Normal(0.02) self.up = up self.down = down with self.init_scope(): self.cup = L.Convolution1D(in_ch, out_ch, 3, 1, 1, initialW=w) self.cpara = L.Convolution1D(in_ch, out_ch, 3, 1, 1, initialW=w) self.cdown = L.Convolution1D(in_ch, out_ch, 4, 2, 1, initialW=w) self.bn0 = L.BatchNormalization(out_ch)
def __init__(self, in_ch, out_ch, up=False, down=False): super(CIGLU, self).__init__() w = initializers.Normal(0.02) self.up = up self.down = down with self.init_scope(): self.cup = L.Convolution1D(in_ch, out_ch, 59, 1, 29, initialW=w) self.cdown = L.Convolution1D(in_ch, out_ch, 60, 2, 29, initialW=w) self.cpara = L.Convolution1D(in_ch, out_ch, 59, 1, 29, initialW=w) self.bn0 = L.BatchNormalization(out_ch) self.glu0 = GLU()
def __init__(self, in_ch, out_ch, adv_type='sat', up=False, down=False): super(C1BG, self).__init__() w = initializers.GlorotUniform() self.up = up self.down = down with self.init_scope(): self.cup = L.Convolution1D(in_ch, out_ch, 3, 1, 1, initialW=w) self.cpara = L.Convolution1D(in_ch, out_ch, 3, 1, 1, initialW=w) self.cdown = L.Convolution1D(in_ch, out_ch, 4, 2, 1, initialW=w) #self.bn0 = L.BatchNormalization(out_ch) self.cin0 = ConditionalInstanceNormalization(out_ch, adv_type)
def __init__(self, n_in, n_hid, n_out, do_prob=0.): super(CNN, self).__init__() self.dropout_prob = do_prob # w = self._conv1d_weight_initializer w = chainer.initializers.GlorotNormal() with self.init_scope(): self.conv1 = L.Convolution1D(n_in, n_hid, ksize=5, stride=1, pad=0, initialW=w, initial_bias=0.1) self.bn1 = L.BatchNormalization(n_hid) self.conv2 = L.Convolution1D(n_hid, n_hid, ksize=5, stride=1, pad=0, initialW=w, initial_bias=0.1) self.bn2 = L.BatchNormalization(n_hid) self.conv_predict = L.Convolution1D(n_hid, n_out, ksize=1, initialW=w, initial_bias=0.1) self.conv_attention = L.Convolution1D(n_hid, 1, ksize=1, initialW=w, initial_bias=0.1)
def __init__(self, in_ch, out_ch): w = initializers.Normal(0.02) super(ResBlock, self).__init__() with self.init_scope(): self.cbg0 = C1BG(in_ch, out_ch) self.c0 = L.Convolution1D(in_ch, in_ch, 3, 1, 1, initialW=w) self.bn0 = L.BatchNormalization(in_ch)
def __init__(self, in_channels, out_channels, ksize, stride, pad, dilate=1, groups=1, use_bias=False, use_bn=True, bn_eps=1e-5, activation=(lambda: F.relu), dropout_rate=0.0, **kwargs): super(ConvBlock1d, self).__init__(**kwargs) self.activate = (activation is not None) self.use_bn = use_bn self.use_dropout = (dropout_rate != 0.0) with self.init_scope(): self.conv = L.Convolution1D(in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, nobias=(not use_bias), dilate=dilate, groups=groups) if self.use_bn: self.bn = L.BatchNormalization(size=out_channels, eps=bn_eps) if self.activate: self.activ = activation() if self.use_dropout: self.dropout = partial(F.dropout, ratio=dropout_rate)
def conv1d1(in_channels, out_channels, stride=1, groups=1, use_bias=False, **kwargs): """ 1-dim kernel version of the 1D convolution layer. Parameters: ---------- in_channels : int Number of input channels. out_channels : int Number of output channels. stride : int, default 1 Stride of the convolution. groups : int, default 1 Number of groups. use_bias : bool, default False Whether the layer uses a bias vector. """ return L.Convolution1D(in_channels=in_channels, out_channels=out_channels, ksize=1, stride=stride, nobias=(not use_bias), groups=groups, **kwargs)
def __init__(self, in_ch, out_ch, adv_type='sat'): w = initializers.GlorotUniform() super(ResBlock, self).__init__() with self.init_scope(): self.cbg0 = C1BG(in_ch, out_ch, adv_type) self.c0 = L.Convolution1D(in_ch, in_ch, 3, 1, 1, initialW=w) #self.bn0 = L.BatchNormalization(in_ch) self.bn0 = ConditionalInstanceNormalization(in_ch, adv_type)
def __init__(self, filter_size, dilation, residual_channels, dilated_channels, skip_channels): super(ResidualBlock, self).__init__() with self.init_scope(): self.conv = L.Convolution1D(residual_channels, dilated_channels, ksize=filter_size, pad=dilation * (filter_size - 1), dilate=dilation) self.res = L.Convolution1D(dilated_channels // 2, residual_channels, 1) self.skip = L.Convolution1D(dilated_channels // 2, skip_channels, 1) self.filter_size = filter_size self.dilation = dilation self.residual_channels = residual_channels
def __init__(self, base=32): super(Generator, self).__init__() w = initializers.Normal(0.02) with self.init_scope(): self.c0 = L.Convolution1D(1, base, 59, 1, 29, initialW=w) self.glu0 = GLU() self.ciglu0 = CIGLU(int(base / 2), base * 2, down=True) self.ciglu1 = CIGLU(base, base * 2, down=True) self.res0 = ResBlock(base, base * 2) self.res1 = ResBlock(base, base * 2) self.res2 = ResBlock(base, base * 2) self.res3 = ResBlock(base, base * 2) self.res4 = ResBlock(base, base * 2) self.res5 = ResBlock(base, base * 2) self.ciglu2 = CIGLU(base, base * 2, up=True) self.ciglu3 = CIGLU(base, base * 2, up=True) self.c1 = L.Convolution1D(base, 1, 59, 1, 29, initialW=w)
def __init__(self, base=32): super(Discriminator, self).__init__() w = initializers.Normal with self.init_scope(): self.c0 = L.Convolution1D(1, base * 2, 59, 1, 29) self.glu = GLU() self.ciglu0 = CIGLU(base, base * 2) self.ciglu1 = CIGLU(base, base * 2) self.l0 = L.Linear(None, 1, initialW=w)
def __init__(self, base=128): w = initializers.Normal(0.02) super(ContentEncoder, self).__init__() with self.init_scope(): self.c0 = L.Convolution2D(1, base, (15, 5), 1, (7, 2), initialW=w) self.cbg0 = C2BG(int(base/2), base*2, down=True) self.cbg1 = C2BG(base, base*4, down=True) self.c1 = L.Convolution1D(2304, base*2, 1, 1, 0, initialW=w) self.bn1 = L.BatchNormalization(base*2) self.res0 = ResBlock(base*2, base*4) self.res1 = ResBlock(base*2, base*4) self.res2 = ResBlock(base*2, base*4) self.res3 = ResBlock(base*2, base*4) #self.res4 = ResBlock(base*2, base*4) #self.res5 = ResBlock(base*2, base*4) self.c2 = L.Convolution1D(base*2, 2304, 1, 1, 0, initialW=w) self.bn2 = L.BatchNormalization(2304)
def __init__(self, in_channels, out_channels, mode='none', activation=F.leaky_relu, bn=True, dr=None): super(ConvBlock, self).__init__() initializer = chainer.initializers.GlorotUniform() self.activation = activation self.bn = bn self.dr = dr with self.init_scope(): if mode == 'none': self.c = L.Convolution1D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer, nobias=bn) elif mode == 'down': self.c = L.Convolution1D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn) elif mode == 'up': self.c = L.Deconvolution1D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn) else: raise Exception('mode is missing') if bn: self.b = L.BatchNormalization(out_channels)
def __init__(self, chs=None): """ モデル定義 Parameter --------- chs : list of int 各レイヤーごとの出力チャンネル数 """ super(Discriminator, self).__init__() if chs is None: chs = [512, 256, 128] with self.init_scope(): he_init = chainer.initializers.HeNormal() # (N, 1025, 200) self.c_0 = L.Convolution1D(1025, chs[0], 6, stride=2, pad=2, initialW=he_init).add_hook(spn()) # (N, 512, 100) self.c_1 = L.Convolution1D(chs[0], chs[1], 6, stride=2, pad=2, initialW=he_init).add_hook(spn()) # (N, 256, 50) self.c_2 = L.Convolution1D(chs[1], chs[2], 10, stride=5, pad=0, initialW=he_init).add_hook(spn()) # (N, 128, 9) self.c_3 = L.Convolution1D(chs[2], 128, 9, pad=4, initialW=he_init)
def __init__(self, out_layers, r_channels, channels=[128, 128], upscale_factors=[16, 16]): super(UpsampleNet, self).__init__() for channel, factor in zip(channels, upscale_factors): self.add_link( L.Deconvolution1D(None, channel, factor, stride=factor, pad=0)) for i in range(out_layers): self.add_link(L.Convolution1D(None, 2 * r_channels, 1)) self.n_deconvolutions = len(channels)
def __init__(self, n_in, n_hid, n_out, do_prob=0.): super(CNN, self).__init__() w = chainer.initializers.LeCunNormal() self.conv1 = L.Convolution1D(n_in, n_hid, ksize=5, stride=1, pad=0, initialW=w) self.bn1 = L.BatchNormalization(n_hid) self.conv2 = L.Convolution1D(n_hid, n_hid, kernel_size=5, stride=1, padding=0, initialW=w) self.bn2 = L.BatchNormalization(n_hid) self.conv_predict = L.Convolution1D(n_hid, n_out, ksize=1, initialW=w) self.conv_attention = L.Convolution1D(n_hid, 1, ksize=1, initialW=w) self.dropout_prob = do_prob
def __init__(self,in_ch,out_ch,up=False,down=False,depthwise=False, activation=F.relu): w = initializers.Normal(0.01) self.up = up self.down = down self.depthwise = depthwise self.activation = activation super(CBR_1D,self).__init__() with self.init_scope(): self.cpara = L.ConvolutionND(1,in_ch,out_ch,3,1,1,initialW=w) self.cdown = L.ConvolutionND(1,in_ch,out_ch,4,2,1,initialW=w) self.cdw = L.Convolution1D(in_ch, out_ch, 1,1,0,initialW=w) self.bn0 = L.BatchNormalization(out_ch)
def __init__(self, out_channel, n_condition, n_layers, n_channel): super(WaveNet, self).__init__() dilated_convs = chainer.ChainList() residual_convs = chainer.ChainList() skip_convs = chainer.ChainList() condition_convs = chainer.ChainList() for i in range(n_layers): dilated_convs.add_link( weight_norm( L.Convolution1D(n_channel, 2 * n_channel, 3, pad=2**i, dilate=2**i))) residual_convs.add_link( weight_norm(L.Convolution1D(n_channel, n_channel, 1))) skip_convs.add_link( weight_norm(L.Convolution1D(n_channel, n_channel, 1))) condition_convs.add_link( weight_norm(L.Convolution1D(n_condition, 2 * n_channel, 1))) with self.init_scope(): self.input_conv = weight_norm( L.Convolution1D(out_channel // 2, n_channel, 1)) self.dilated_convs = dilated_convs self.residual_convs = residual_convs self.skip_convs = skip_convs self.condition_convs = condition_convs self.output_conv = L.Convolution1D( n_channel, out_channel, 1, initialW=chainer.initializers.Zero())
def __init__(self, in_channels, out_channels, ksize=3, pad=1, activation=F.relu, mode='none', bn=True, dr=None): super(ResBlock, self).__init__() initializer = chainer.initializers.GlorotUniform() initializer_sc = chainer.initializers.GlorotUniform() self.activation = activation self.mode = _downsample if mode == 'down' else _upsample if mode == 'up' else None self.learnable_sc = in_channels != out_channels self.dr = dr self.bn = bn with self.init_scope(): self.c1 = L.Convolution1D(in_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn) self.c2 = L.Convolution1D(out_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn) if bn: self.b1 = L.BatchNormalization(out_channels) self.b2 = L.BatchNormalization(out_channels) if self.learnable_sc: self.c_sc = L.Convolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc)
def __init__(self, n_loop, n_layer, a_channels, r_channels, s_channels, use_embed_tanh): super(WaveNet, self).__init__() with self.init_scope(): self.embed = L.Convolution1D(a_channels, r_channels, 2, pad=1, nobias=True) self.resnet = ResidualNet(n_loop, n_layer, 2, r_channels, 2 * r_channels, s_channels) self.proj1 = L.Convolution1D(s_channels, s_channels, 1, nobias=True) self.proj2 = L.Convolution1D(s_channels, a_channels, 1, nobias=True) self.a_channels = a_channels self.s_channels = s_channels self.use_embed_tanh = use_embed_tanh
def __init__(self, config): super(HConvNet, self).__init__() self.n_words = config['n_words'] self.word_emb_dim = config['word_emb_dim'] self.dpout_word = config['dpout_word'] self.enc_dim = config['enc_dim'] self.dpout_enc = config['dpout_enc'] self.glove = config['glove'] self.out_units = self.enc_dim//4 with self.init_scope(): self.conv1 = L.Convolution1D( self.word_emb_dim, self.out_units, ksize=3, stride=1, pad=1, nobias=True) self.conv2 = L.Convolution1D( self.out_units, self.out_units, ksize=3, stride=1, pad=1, nobias=True) self.conv3 = L.Convolution1D( self.out_units, self.out_units, ksize=3, stride=1, pad=1, nobias=True) self.conv4 = L.Convolution1D( self.out_units, self.out_units, ksize=3, stride=1, pad=1, nobias=True)
def __init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0, initialW=None, initial_bias=None): self.finetune = False super().__init__() with self.init_scope(): self.conv = L.Convolution1D(in_channels, out_channels, ksize, stride, pad, True, initialW, initial_bias) self.bn = L.BatchNormalization(out_channels)
def __init__(self): super().__init__() with self.init_scope(): # conv1 self.conv1_rgb = L.Convolution1D(32, 64, 1) self.conv1_pcd = L.Convolution1D(3, 64, 1) # conv2 self.conv2_rgb = L.Convolution1D(64, 128, 1) self.conv2_pcd = L.Convolution1D(64, 128, 1) # conv3, conv4 self.conv3 = L.Convolution1D(256, 512, 1) self.conv4 = L.Convolution1D(512, 1024, 1)
def __init__(self, n_point): super(PoseNetExtractor, self).__init__() with self.init_scope(): # conv1 self.conv1_img = L.Convolution1D(32, 64, 1) self.conv1_pcd = L.Convolution1D(3, 64, 1) # conv2 self.conv2_img = L.Convolution1D(64, 128, 1) self.conv2_pcd = L.Convolution1D(64, 128, 1) # conv3, conv4 self.conv3 = L.Convolution1D(256, 512, 1) self.conv4 = L.Convolution1D(512, 1024, 1) self.n_point = n_point
def __init__(self, imsize, action_size, L_stages, conditional): self.imsize = imsize self.action_size = action_size self.f = F.relu # activation func for encoding part self.L_stages = L_stages self.conditional = conditional super().__init__() with self.init_scope(): in_channel = 6 if self.conditional else 3 self.c1 = L.Convolution2D(in_channel, 16, stride=1, ksize=3, pad=1) self.c2 = L.Convolution2D(16, 32, stride=2, ksize=3, pad=1) self.c3 = L.Convolution2D(32, 48, stride=2, ksize=2, pad=1) self.c4 = L.Convolution2D(48, 48, stride=2, ksize=2, pad=1) self.c5 = L.Convolution2D(48, 64, stride=2, ksize=2, pad=1) self.c6 = L.Convolution2D(64, self.L_stages+12, stride=2, ksize=2, pad=1) self.bn1 = L.BatchNormalization(32) self.bn2 = L.BatchNormalization(48) self.bn3 = L.BatchNormalization(48) self.bn4 = L.BatchNormalization(64) self.bn5 = L.BatchNormalization(self.L_stages+12) self.v = v_function.FCVFunction(3 * 3 * (self.L_stages+12)) self.dc1 = L.Convolution1D(1, 16, stride=1, ksize=3) self.dc2 = L.Convolution1D(16, 32, stride=1, ksize=3) self.dc3 = L.Convolution1D(32, 48, stride=1, ksize=3) self.dc4 = L.Convolution1D(48, 48, stride=1, ksize=3) self.dc5 = L.Convolution1D(48, 64, stride=1, ksize=3) self.dc6 = L.Convolution1D(64, action_size, stride=1, ksize=3) self.dbn1 = L.BatchNormalization(16) self.dbn2 = L.BatchNormalization(32) self.dbn3 = L.BatchNormalization(48) self.dbn4 = L.BatchNormalization(48) self.dbn5 = L.BatchNormalization(64)
def __init__(self, pretrained_model=None, n_fg_class=21, n_point=1000, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)): super(PoseNet, self).__init__() param, path = utils.prepare_pretrained_model( {'n_fg_class': n_fg_class}, pretrained_model, self._models) self.n_fg_class = param['n_fg_class'] self.n_point = n_point self.mean = np.array(mean, dtype=np.float32)[:, None, None] self.std = np.array(std, dtype=np.float32)[:, None, None] with self.init_scope(): # extractor self.resnet_extractor = ResNet18Extractor() self.pspnet_extractor = PSPNetExtractor() self.posenet_extractor = PoseNetExtractor(self.n_point) # conv1 self.conv1_rot = L.Convolution1D(1408, 640, 1) self.conv1_trans = L.Convolution1D(1408, 640, 1) self.conv1_conf = L.Convolution1D(1408, 640, 1) # conv2 self.conv2_rot = L.Convolution1D(640, 256, 1) self.conv2_trans = L.Convolution1D(640, 256, 1) self.conv2_conf = L.Convolution1D(640, 256, 1) # conv3 self.conv3_rot = L.Convolution1D(256, 128, 1) self.conv3_trans = L.Convolution1D(256, 128, 1) self.conv3_conf = L.Convolution1D(256, 128, 1) # conv4 self.conv4_rot = L.Convolution1D(128, self.n_fg_class * 4, 1) self.conv4_trans = L.Convolution1D(128, self.n_fg_class * 3, 1) self.conv4_conf = L.Convolution1D(128, self.n_fg_class, 1) if pretrained_model is not None: chainer.serializers.load_npz(path, self)
def __init__( self, *, n_fg_class, centerize_pcd=True, pretrained_resnet18=False, loss=None, ): super().__init__() self._n_fg_class = n_fg_class self._centerize_pcd = centerize_pcd if loss is None: loss = "add/add_s" assert loss in [ "add", "add/add_s", ] self._loss = loss with self.init_scope(): # extractor if pretrained_resnet18: self.resnet_extractor = morefusion.models.ResNet18Extractor() else: self.resnet_extractor = ( morefusion.models.dense_fusion.ResNet18() ) self.pspnet_extractor = ( morefusion.models.dense_fusion.PSPNetExtractor() ) self.posenet_extractor = PoseNetExtractor() # conv1 self.conv1_rot = L.Convolution1D(1408, 640, 1) self.conv1_trans = L.Convolution1D(1408, 640, 1) self.conv1_conf = L.Convolution1D(1408, 640, 1) # conv2 self.conv2_rot = L.Convolution1D(640, 256, 1) self.conv2_trans = L.Convolution1D(640, 256, 1) self.conv2_conf = L.Convolution1D(640, 256, 1) # conv3 self.conv3_rot = L.Convolution1D(256, 128, 1) self.conv3_trans = L.Convolution1D(256, 128, 1) self.conv3_conf = L.Convolution1D(256, 128, 1) # conv4 self.conv4_rot = L.Convolution1D(128, n_fg_class * 4, 1) self.conv4_trans = L.Convolution1D(128, n_fg_class * 3, 1) self.conv4_conf = L.Convolution1D(128, n_fg_class, 1)