コード例 #1
0
ファイル: rnn_cells.py プロジェクト: strategist922/knmt
def create_initializer(init_type, scale=None, fillvalue=None):
    if init_type == 'identity':
        return initializers.Identity() if scale is None else initializers.Identity(scale=scale)
    if init_type == 'constant':
        return initializers.Constant(fillvalue)
    if init_type == 'zero':
        return initializers.Zero()
    if init_type == 'one':
        return initializers.One()
    if init_type == 'normal':
        return initializers.Normal() if scale is None else initializers.Normal(scale)
    if init_type == 'glorotNormal':
        return initializers.GlorotNormal() if scale is None else initializers.GlorotNormal(scale)
    if init_type == 'heNormal':
        return initializers.HeNormal() if scale is None else initializers.HeNormal(scale)
    if init_type == 'orthogonal':
        return initializers.Orthogonal(
            scale) if scale is None else initializers.Orthogonal(scale)
    if init_type == 'uniform':
        return initializers.Uniform(
            scale) if scale is None else initializers.Uniform(scale)
    if init_type == 'leCunUniform':
        return initializers.LeCunUniform(
            scale) if scale is None else initializers.LeCunUniform(scale)
    if init_type == 'glorotUniform':
        return initializers.GlorotUniform(
            scale) if scale is None else initializers.GlorotUniform(scale)
    if init_type == 'heUniform':
        return initializers.HeUniform(
            scale) if scale is None else initializers.HeUniform(scale)
    raise ValueError("Unknown initializer type: {0}".format(init_type))
コード例 #2
0
 def __init__(self, size, ff_size=2048, dropout_ratio=0.1):
     super().__init__()
     self.dropout_ratio = dropout_ratio
     with self.init_scope():
         self.l1 = L.Linear(size,
                            ff_size,
                            initialW=initializers.GlorotUniform())
         self.l2 = L.Linear(ff_size,
                            size,
                            initialW=initializers.GlorotUniform())
コード例 #3
0
 def __init__(self, input_size, n_units, dropout):
     super(ListNet, self).__init__(l1=L.Linear(input_size,
                                               n_units,
                                               initialW=I.GlorotUniform()),
                                   l2=L.Linear(n_units,
                                               n_units,
                                               initialW=I.GlorotUniform()),
                                   l3=L.Linear(n_units,
                                               1,
                                               initialW=I.GlorotUniform(),
                                               nobias=True))
     self.add_persistent("_dropout", dropout)
コード例 #4
0
    def __init__(self, in_ch, out_ch, activation=F.relu):
        super(OptimizedBlock, self).__init__()
        w = initializers.GlorotUniform(math.sqrt(2))
        w_sc = initializers.GlorotUniform()
        self.activation = activation
        with self.init_scope():
            self.c1 = L.Convolution2D(in_ch, out_ch, 3, 1, 1, initialW=w)
            self.c2 = L.Convolution2D(out_ch, out_ch, 3, 1, 1, initialW=w)
            self.c_sc = L.Convolution2D(in_ch, out_ch, 1, 1, 0, initialW=w_sc)

            self.bn1 = L.BatchNormalization(out_ch)
            self.bn2 = L.BatchNormalization(out_ch)
            self.bn_sc = L.BatchNormalization(out_ch)
コード例 #5
0
    def __init__(self, in_ch, out_ch, down=False, activation=F.relu):
        super(Dis_ResBlock, self).__init__()

        w = initializers.GlorotUniform(math.sqrt(2))
        w_sc = initializers.GlorotUniform()
        self.down = down
        self.activation = activation

        self.learnable_sc = (in_ch != out_ch) or down
        with self.init_scope():
            self.c0 = SNConvolution2D(in_ch, out_ch, 3, 1, 1, initialW=w)
            self.c1 = SNConvolution2D(out_ch, out_ch, 3, 1, 1, initialW=w)

            if self.learnable_sc:
                self.c_sc = SNConvolution2D(in_ch, out_ch, 1, 1, 0, initialW=w_sc)
コード例 #6
0
 def __init__(self, embedding, out_size=None, window_size=3, dropout=0.5):
     super().__init__()
     if isinstance(embedding, np.ndarray):
         vocab_size, embed_size = embedding.shape
     else:
         vocab_size, embed_size = embedding
         embedding = None
     if out_size is None:
         out_size = embed_size
     self.out_size = out_size
     with self.init_scope():
         self.embed = L.EmbedID(
             in_size=vocab_size,
             out_size=embed_size,
             initialW=embedding,
         )
         self.conv = L.Convolution2D(
             in_channels=1,
             out_channels=out_size,
             ksize=(window_size, embed_size),
             stride=(1, embed_size),
             pad=(int(window_size / 2), 0),
             nobias=True,
             initialW=initializers.GlorotUniform(),
             initial_bias=0,
         )
     self.dropout = dropout
     self._desc = {
         'embedding': (vocab_size, embed_size),
         'out_size': out_size,
         'window_size': window_size,
         'dropout': dropout,
     }
コード例 #7
0
    def __init__(self, base=64, activation=F.relu):
        super(Generator, self).__init__()

        self.activation = activation
        w = initializers.GlorotUniform()

        with self.init_scope():
            self.l0_body = L.Linear(256, 4*3*base*8, initialW=w)
            self.res1_body = Gen_ResBlock(base*16, base*4, activation=activation, up=True)
            self.res2_body = Gen_ResBlock(base*8, base*4, activation=activation, up=True)
            self.res3_body = Gen_ResBlock(base*8, base*4, activation=activation, up=True)
            self.res4_body = Gen_ResBlock(base*8, base*2, activation=activation, up=True)
            self.res5_body = Gen_ResBlock(base*4, base, activation=activation, up=True)
            self.bn0_body = L.BatchNormalization(base*2)

            self.l0_seg = L.Linear(256, 4*3*base*8, initialW=w)
            self.res1_seg = Gen_ResBlock(base*8, base*4, activation=activation, up=True)
            self.res2_seg = Gen_ResBlock(base*4, base*4, activation=activation, up=True)
            self.res3_seg = Gen_ResBlock(base*4, base*4, activation=activation, up=True)
            self.res4_seg = Gen_ResBlock(base*4, base*2, activation=activation, up=True)
            self.res5_seg = Gen_ResBlock(base*2, base, activation=activation, up=True)
            self.bn0_seg = L.BatchNormalization(base)

            self.c0_body = L.Convolution2D(base * 2, 3, 3, 1, 1, initialW=w)
            self.c0_seg = L.Convolution2D(base, 3, 3, 1, 1, initialW=w)
コード例 #8
0
ファイル: model.py プロジェクト: binbomb/SuperResolution
    def __init__(self, in_ch, out_ch, k=16):
        w = initializers.GlorotUniform()
        super(ChannelAttention, self).__init__()

        with self.init_scope():
            self.l0 = L.Linear(in_ch, int(in_ch / 16))
            self.l1 = L.Linear(int(in_ch / 16), in_ch)
コード例 #9
0
    def __init__(self):
        super(two_times_two_in, self).__init__()
        self.W = initializers.GlorotUniform()
        self.b = initializers.Constant(fill_value=0.0)
        with self.init_scope():

            self.conv1 = L.Convolution2D(3,
                                         16,
                                         ksize=7,
                                         stride=1,
                                         initialW=self.W,
                                         initial_bias=self.b)
            self.conv2 = L.Convolution2D(16,
                                         64,
                                         ksize=7,
                                         stride=1,
                                         initialW=self.W,
                                         initial_bias=self.b)
            self.conv3 = L.Convolution2D(64,
                                         256,
                                         ksize=7,
                                         stride=1,
                                         initialW=self.W,
                                         initial_bias=self.b)
            self.fc4 = L.Linear(None, 52, initialW=self.W, initial_bias=self.b)
            self.BN1 = L.BatchNormalization(16)
            self.BN2 = L.BatchNormalization(64)
            self.BN3 = L.BatchNormalization(256)
            self.BN4 = L.BatchNormalization(52)
コード例 #10
0
 def __init__(self, in_ch, out_ch):
     super(SECatResBlock, self).__init__()
     w = initializers.GlorotUniform()
     with self.init_scope():
         self.c0 = L.Convolution2D(in_ch, out_ch, 3, 1, 1, initialW=w)
         self.bn0 = L.BatchNormalization(out_ch)
         self.se = SECat(out_ch, int(out_ch / 16))
コード例 #11
0
ファイル: ssd.py プロジェクト: Iflier/chainer-ssd
 def __init__(self, n_class, aspect_ratios):
     init = {
         'initialW': initializers.GlorotUniform(),
         'initial_bias': initializers.constant.Zero(),
     }
     super().__init__(
         base=L.VGG16Layers(pretrained_model=None),
         conv5_1=L.DilatedConvolution2D(None, 512, 3, pad=1, **init),
         conv5_2=L.DilatedConvolution2D(None, 512, 3, pad=1, **init),
         conv5_3=L.DilatedConvolution2D(None, 512, 3, pad=1, **init),
         conv6=L.DilatedConvolution2D(None,
                                      1024,
                                      3,
                                      pad=6,
                                      dilate=6,
                                      **init),
         conv7=L.Convolution2D(None, 1024, 1, **init),
         conv8_1=L.Convolution2D(None, 256, 1, **init),
         conv8_2=L.Convolution2D(None, 512, 3, stride=2, pad=1, **init),
         conv9_1=L.Convolution2D(None, 128, 1, **init),
         conv9_2=L.Convolution2D(None, 256, 3, stride=2, pad=1, **init),
         conv10_1=L.Convolution2D(None, 128, 1, **init),
         conv10_2=L.Convolution2D(None, 256, 3, **init),
         conv11_1=L.Convolution2D(None, 128, 1, **init),
         conv11_2=L.Convolution2D(None, 256, 3, **init),
         multibox=MultiBox(n_class, aspect_ratios=aspect_ratios, init=init),
     )
     self.n_class = n_class
     self.aspect_ratios = aspect_ratios
     self.train = False
コード例 #12
0
    def __init__(self, base=128, adv_type='sat'):
        w = initializers.GlorotUniform()
        super(GeneratorWithCIN, self).__init__()
        with self.init_scope():
            self.c0 = L.Convolution2D(1, base, (15, 5), 1, (7, 2), initialW=w)
            self.cbg0 = C2BG(int(base/2), base*2, down=True)
            self.cbg1 = C2BG(base, base*4, down=True)

            self.c1 = L.Convolution1D(2304, base*2, 1, 1, 0, initialW=w)
            self.bn1 = L.BatchNormalization(base*2)

            self.res0 = ResBlock(base*2, base*4, adv_type)
            self.res1 = ResBlock(base*2, base*4, adv_type)
            self.res2 = ResBlock(base*2, base*4, adv_type)
            self.res3 = ResBlock(base*2, base*4, adv_type)
            self.res4 = ResBlock(base*2, base*4, adv_type)
            self.res5 = ResBlock(base*2, base*4, adv_type)
            self.res6 = ResBlock(base*2, base*4, adv_type)
            self.res7 = ResBlock(base*2, base*4, adv_type)
            self.res8 = ResBlock(base*2, base*4, adv_type)

            self.c2 = L.Convolution1D(base*2, 2304, 1, 1, 0, initialW=w)
            self.bn2 = L.BatchNormalization(2304)

            self.cbg2 = C2BG(base*2, base*8, up=True)
            self.cbg3 = C2BG(base*4, 72, up=True)

            self.c3 = L.Convolution2D(36, 1, 3, 1, 1, initialW=w)
コード例 #13
0
    def __init__(self, base=64, activation=F.relu):
        super(Generator, self).__init__()

        self.activation = activation
        w = initializers.GlorotUniform()

        with self.init_scope():
            self.l0 = L.Linear(128 + 9, 4 * 4 * base * 8, initialW=w)
            self.res1 = Gen_ResBlock(base * 8,
                                     base * 4,
                                     activation=activation,
                                     up=True)
            self.res2 = Gen_ResBlock(base * 4,
                                     base * 4,
                                     activation=activation,
                                     up=True)
            self.res3 = Gen_ResBlock(base * 4,
                                     base * 4,
                                     activation=activation,
                                     up=True)
            self.res4 = Gen_ResBlock(base * 4,
                                     base * 2,
                                     activation=activation,
                                     up=True)
            self.res5 = Gen_ResBlock(base * 2,
                                     base,
                                     activation=activation,
                                     up=True)
            self.bn0 = L.BatchNormalization(base)
            self.c0 = L.Convolution2D(base, 3, 3, 1, 1, initialW=w)
コード例 #14
0
    def __init__(self, base=64, activation=F.leaky_relu):
        super(Discriminator, self).__init__()
        w = initializers.GlorotUniform()
        self.activation = activation
        with self.init_scope():
            self.c0 = Dis_ResBlock(3, base, activation=activation, down=True)
            self.res0 = Dis_ResBlock(base,
                                     base * 2,
                                     activation=activation,
                                     down=True)
            self.res1 = Dis_ResBlock(base * 2,
                                     base * 4,
                                     activation=activation,
                                     down=True)
            self.res2 = Dis_ResBlock(base * 4,
                                     base * 4,
                                     activation=activation,
                                     down=True)
            self.res3 = Dis_ResBlock(base * 4,
                                     base * 8,
                                     activation=activation,
                                     down=True)
            self.res4 = Dis_ResBlock(base * 8, base * 8, activation=activation)
            self.lembed = L.Linear(None, base * 8, initialW=w)
            self.l1 = L.Linear(None, 1, initialW=w)

            self.bn0 = L.BatchNormalization(base)
コード例 #15
0
ファイル: model.py プロジェクト: binbomb/SuperResolution
    def __init__(self, in_ch, out_ch):
        w = initializers.GlorotUniform()
        super(SpatialAttention, self).__init__()

        with self.init_scope():
            self.c0 = L.Convolution2D(in_ch, out_ch, 1, 1, 0, initialW=w)
            self.c1 = L.Convolution2D(out_ch, out_ch, 1, 1, 0, initialW=w)
コード例 #16
0
    def __init__(self, in_ch, out_ch, down=True, sn=False):
        super(CBR_Dis, self).__init__()
        w = initializers.GlorotUniform()
        self.down = down
        with self.init_scope():
            if sn:
                self.cdown = SNConvolution2D(in_ch,
                                             out_ch,
                                             4,
                                             2,
                                             1,
                                             initialW=w)
                self.cpara = SNConvolution2D(in_ch,
                                             out_ch,
                                             3,
                                             1,
                                             1,
                                             initialW=w)

            else:
                self.cdown = L.Convolution2D(in_ch,
                                             out_ch,
                                             4,
                                             2,
                                             1,
                                             initialW=w)
                self.cpara = L.Convolution2D(in_ch,
                                             out_ch,
                                             3,
                                             1,
                                             1,
                                             initialW=w)
コード例 #17
0
    def __init__(self,
                 n_heads,
                 in_size,
                 out_size=None,
                 nobias=False,
                 initialW=None,
                 initial_bias=None):
        super(GraphAttentionConvolution, self).__init__()

        if out_size is None:
            in_size, out_size = None, in_size
        self.out_size = n_heads * out_size
        self.n_heads = n_heads

        with self.init_scope():
            if initialW is None:
                initialW = initializers.GlorotUniform()
            self.W = chainer.Parameter(initialW,
                                       (1, self.n_heads, in_size, out_size))
            if nobias:
                self.b = None
            else:
                if initial_bias is None:
                    initial_bias = 0
                bias_initializer = initializers._get_initializer(initial_bias)
                self.b = chainer.Parameter(bias_initializer, self.out_size)
            self.attention_W = chainer.Parameter(
                initialW, (1, self.n_heads, out_size, 2))
コード例 #18
0
 def __init__(self, out_ch):
     super(SPADE, self).__init__()
     w = initializers.GlorotUniform()
     self.eps = 1e-5
     with self.init_scope():
         self.c0 = SNConvolution2D(3, 128, 3, 1, 1, initialW=w)
         self.cw = SNConvolution2D(128, out_ch, 3, 1, 1, initialW=w)
         self.cb = SNConvolution2D(128, out_ch, 3, 1, 1, initialW=w)
コード例 #19
0
    def __init__(self, in_ch, out_ch, up=False, activation=F.relu):
        super(Gen_ResBlock, self).__init__()
        self.activation = activation
        self.up = up
        self.learnable_sc = in_ch != out_ch or up
        w = initializers.GlorotUniform(math.sqrt(2))
        w_sc = initializers.GlorotUniform()

        with self.init_scope():
            self.c0 = L.Convolution2D(in_ch, out_ch, 3, 1, 1, initialW=w)
            self.c1 = L.Convolution2D(out_ch, out_ch, 3, 1, 1, initialW=w)

            self.bn0 = L.BatchNormalization(in_ch)
            self.bn1 = L.BatchNormalization(out_ch)

            if self.learnable_sc:
                self.c_sc = L.Convolution2D(in_ch, out_ch, 1,1,0, initialW=w_sc)
コード例 #20
0
 def __init__(self, in_ch, out_ch, adv_type='sat'):
     w = initializers.GlorotUniform()
     super(ResBlock, self).__init__()
     with self.init_scope():
         self.cbg0 = C1BG(in_ch, out_ch, adv_type)
         self.c0 = L.Convolution1D(in_ch, in_ch, 3, 1, 1, initialW=w)
         #self.bn0 = L.BatchNormalization(in_ch)
         self.bn0 = ConditionalInstanceNormalization(in_ch, adv_type)
コード例 #21
0
ファイル: embedding.py プロジェクト: GAIMJKP/models-2
    def __init__(self, size, vocab_size):
        super().__init__()
        self.size = size

        with self.init_scope():
            self.embed = L.EmbedID(vocab_size,
                                   size,
                                   initialW=initializers.GlorotUniform())
コード例 #22
0
ファイル: chainerc2d.py プロジェクト: roya0045/cvar2
    def __init__(self, in_channels:int, out_channels:int, filtr:(tuple,list), sqrt=False,noB=0,
                 KCD=False,
                 verbose=False,stride=1, pad=0, initW=initializers.GlorotUniform(scale=1.2,dtype=np.float32),
                  initB=initializers.GlorotUniform(scale=1.2,dtype=np.float32),bias_dept=2, **kwargs):
        """
        input channels,
        number of outputs
        window
        
        """
        super(Convar2D, self).__init__()

        argument.check_unexpected_kwargs(
            kwargs, deterministic="deterministic argument is not "
            "supported anymore. "
            "Use chainer.using_config('cudnn_deterministic', value) "
            "context where value is either `True` or `False`.")
        dilate, = argument.parse_kwargs(kwargs, ('dilate', 1))

        #if filter is None:
        #    out_channels, ksize, in_channels = in_channels, out_channels, None

        self.filter = filtr
        self.sqrt=sqrt
        self.noB=noB
        self.V=verbose
        self.KCD=KCD
        self.stride = _pair(stride)
        self.pad = _pair(pad)
        self.dilate = _pair(dilate)
        self.out_channels = out_channels

        with self.init_scope():
            #W_initializer = initializers._get_initializer(initW)
            self.W = variable.Parameter(initW)
            if in_channels is not None:
                self._initialize_params(in_channels)

            if noB:
                self.b = None
            else:
                if initB is None:
                    initB = 0
                #bias_initializer = initializers._get_initializer(initB)
                self.b = variable.Parameter(initB, (self.out_channel))#out_channels)
コード例 #23
0
 def __init__(self, in_ch, out_ch, bn=True, activ=F.relu):
     super(CBR, self).__init__()
     w = initializers.GlorotUniform()
     self.bn = bn
     self.activ = activ
     with self.init_scope():
         self.c0 = L.Convolution2D(in_ch, out_ch, 4, 2, 1, initialW=w)
         if bn:
             self.bn0 = L.BatchNormalization(out_ch)
コード例 #24
0
ファイル: model.py プロジェクト: binbomb/SuperResolution
    def __init__(self, in_ch, out_ch):
        w = initializers.GlorotUniform()
        super(ResidualAttentionModule, self).__init__()

        with self.init_scope():
            self.c0 = L.Convolution2D(in_ch, out_ch, 3, 1, 1, initialW=w)
            self.c1 = L.Convolution2D(out_ch, out_ch, 3, 1, 1, initialW=w)
            self.ca = ChannelAttention(out_ch, out_ch)
            self.sa = SpatialAttention(out_ch, out_ch)
コード例 #25
0
 def __init__(self, in_channels, out_channels, down=True):
     w = initializers.GlorotUniform()
     self.down = down
     super(CBR_Dis, self).__init__()
     with self.init_scope():
         self.cdown = EqualizedConv2d(in_channels, out_channels, 4, 2, 1)
         self.cpara = EqualizedConv2d(in_channels, out_channels, 3, 1, 1)
         self.csec = EqualizedConv2d(out_channels, out_channels, 3, 1, 1)
         self.c_sc = EqualizedConv2d(in_channels, out_channels, 1, 1, 0)
コード例 #26
0
 def __init__(self, base=64, bn=True, activ=F.relu):
     super(GuideDecoder, self).__init__()
     w = initializers.GlorotUniform()
     with self.init_scope():
         self.up0 = Upsamp(base * 16, base * 8, bn=bn, activ=activ)
         self.up1 = Upsamp(base * 8, base * 4, bn=bn, activ=activ)
         self.up2 = Upsamp(base * 4, base * 2, bn=bn, activ=activ)
         self.up3 = Upsamp(base * 2, base, bn=bn, activ=activ)
         self.cout = L.Convolution2D(base, 3, 3, 1, 1, initialW=w)
コード例 #27
0
 def __init__(self, in_ch, out_ch):
     w = initializers.GlorotUniform()
     super(Dense_Block, self).__init__()
     with self.init_scope():
         self.res0 = ResBlock_single(in_ch, out_ch)
         self.res1 = ResBlock_single(out_ch, out_ch)
         self.res2 = ResBlock_single(out_ch, out_ch)
         self.res3 = ResBlock_single(out_ch, out_ch)
         self.c0 = L.Convolution2D(out_ch, out_ch, 3, 1, 1, initialW=w)
コード例 #28
0
ファイル: model.py プロジェクト: binbomb/SuperResolution
    def __init__(self, in_ch, out_ch, up=False, use_bn=False):
        self.use_bn = use_bn
        self.up = up
        w = initializers.GlorotUniform()
        super(CBR, self).__init__()

        with self.init_scope():
            self.c0 = L.Convolution2D(in_ch, out_ch * 4, 3, 1, 1, initialW=w)
            self.bn0 = L.BatchNormalization(out_ch)
コード例 #29
0
 def __init__(self, in_ch, out_ch):
     super(SPADEResblk, self).__init__()
     w = initializers.GlorotUniform()
     with self.init_scope():
         self.spade0 = SPADE(in_ch)
         self.c0 = SNConvolution2D(in_ch, in_ch, 3, 1, 1, initialW=w)
         self.spade1 = SPADE(in_ch)
         self.c1 = SNConvolution2D(in_ch, out_ch, 3, 1, 1, initialW=w)
         self.spade_sc = SPADE(in_ch)
         self.c_sc = SNConvolution2D(in_ch, out_ch, 3, 1, 1, initialW=w)
コード例 #30
0
    def __init__(self, in_ch, out_ch):
        super(Gen_SEResBlock, self).__init__()
        w = initializers.GlorotUniform(math.sqrt(2))
        with self.init_scope():
            self.c0 = L.Convolution2D(in_ch, out_ch, 3, 1, 1, initialW=w)
            self.c1 = L.Convolution2D(out_ch, out_ch, 3, 1, 1, initialW=w)
            self.se0 = SE(out_ch)

            self.bn0 = L.BatchNormalization(out_ch)
            self.bn1 = L.BatchNormalization(out_ch)