Пример #1
0
 def __init__(self, n_class, aspect_ratios):
     init = {
         'initialW': initializers.GlorotUniform(),
         'initial_bias': initializers.constant.Zero(),
     }
     super().__init__(
         base=L.VGG16Layers(pretrained_model=None),
         conv5_1=L.DilatedConvolution2D(None, 512, 3, pad=1, **init),
         conv5_2=L.DilatedConvolution2D(None, 512, 3, pad=1, **init),
         conv5_3=L.DilatedConvolution2D(None, 512, 3, pad=1, **init),
         conv6=L.DilatedConvolution2D(None,
                                      1024,
                                      3,
                                      pad=6,
                                      dilate=6,
                                      **init),
         conv7=L.Convolution2D(None, 1024, 1, **init),
         conv8_1=L.Convolution2D(None, 256, 1, **init),
         conv8_2=L.Convolution2D(None, 512, 3, stride=2, pad=1, **init),
         conv9_1=L.Convolution2D(None, 128, 1, **init),
         conv9_2=L.Convolution2D(None, 256, 3, stride=2, pad=1, **init),
         conv10_1=L.Convolution2D(None, 128, 1, **init),
         conv10_2=L.Convolution2D(None, 256, 3, **init),
         conv11_1=L.Convolution2D(None, 128, 1, **init),
         conv11_2=L.Convolution2D(None, 256, 3, **init),
         multibox=MultiBox(n_class, aspect_ratios=aspect_ratios, init=init),
     )
     self.n_class = n_class
     self.aspect_ratios = aspect_ratios
     self.train = False
Пример #2
0
    def __init__(self, n_fg_class):
        super(LatterSsdNet, self).__init__()
        with self.init_scope():
            self.norm4 = Normalize(512)

            # remained conv net in VGG16 class: chainercv/links/model/ssd/ssd_vgg16.py
            self.conv5_1 = L.DilatedConvolution2D(512, 3, pad=1)
            self.conv5_2 = L.DilatedConvolution2D(512, 3, pad=1)
            self.conv5_3 = L.DilatedConvolution2D(512, 3, pad=1)

            self.conv6 = L.DilatedConvolution2D(1024, 3, pad=6, dilate=6)
            self.conv7 = L.Convolution2D(1024, 1)

            # based on VGG16Extractor300 class: chainercv/links/model/ssd/ssd_vgg16.py
            self.conv8_1 = L.Convolution2D(256, 1)
            self.conv8_2 = L.Convolution2D(512, 3, stride=2, pad=1)

            self.conv9_1 = L.Convolution2D(128, 1)
            self.conv9_2 = L.Convolution2D(256, 3, stride=2, pad=1)

            self.conv10_1 = L.Convolution2D(128, 1)
            self.conv10_2 = L.Convolution2D(256, 3)

            self.conv11_1 = L.Convolution2D(128, 1)
            self.conv11_2 = L.Convolution2D(256, 3)

            # Multibox class: chainercv/links/model/ssd/ssd_vgg16.py
            self.multibox = Multibox(n_class=n_fg_class + 1,
                                     aspect_ratios=((2, ), (2, 3), (2, 3),
                                                    (2, 3), (2, ), (2, )))
Пример #3
0
    def __init__(self, **links):
        super(VGG16, self).__init__(
            conv1_1=L.Convolution2D(None, 64, 3, pad=1),
            conv1_2=L.Convolution2D(None, 64, 3, pad=1),

            conv2_1=L.Convolution2D(None, 128, 3, pad=1),
            conv2_2=L.Convolution2D(None, 128, 3, pad=1),

            conv3_1=L.Convolution2D(None, 256, 3, pad=1),
            conv3_2=L.Convolution2D(None, 256, 3, pad=1),
            conv3_3=L.Convolution2D(None, 256, 3, pad=1),

            conv4_1=L.Convolution2D(None, 512, 3, pad=1),
            conv4_2=L.Convolution2D(None, 512, 3, pad=1),
            conv4_3=L.Convolution2D(None, 512, 3, pad=1),
            norm4=Normalize(512, initial=initializers.Constant(20)),

            conv5_1=L.DilatedConvolution2D(None, 512, 3, pad=1),
            conv5_2=L.DilatedConvolution2D(None, 512, 3, pad=1),
            conv5_3=L.DilatedConvolution2D(None, 512, 3, pad=1),

            conv6=L.DilatedConvolution2D(None, 1024, 3, pad=6, dilate=6),
            conv7=L.Convolution2D(None, 1024, 1),
        )
        for name, link in six.iteritems(links):
            self.add_link(name, link)
Пример #4
0
    def __init__(self, n_class=3, bn=True, wpad=True):
        super(UNetDilate, self).__init__()

        pad = 1 if wpad else 0
        self.outsize = self.insize if wpad else (self.insize - 92)

        self.add_link('c1_1', L.Convolution2D(6,
                                              64,
                                              ksize=3,
                                              stride=1,
                                              pad=pad))
        self.add_link('c1_2',
                      L.Convolution2D(64, 64, ksize=3, stride=1, pad=pad))
        self.add_link('c2_1',
                      L.Convolution2D(64, 64, ksize=3, stride=1, pad=pad))
        self.add_link('c2_2',
                      L.Convolution2D(64, 64, ksize=3, stride=1, pad=pad))
        self.add_link(
            'di1_1',
            L.DilatedConvolution2D(64, 64, ksize=3, stride=1, dilate=2, pad=2))
        self.add_link(
            'di2_1',
            L.DilatedConvolution2D(64, 64, ksize=3, stride=1, dilate=4, pad=4))
        self.add_link(
            'di3_1',
            L.DilatedConvolution2D(64, 64, ksize=3, stride=1, dilate=6, pad=6))

        #self.add_link('up4', L.Deconvolution2D(1024, 512, ksize=4, stride=2, pad=0))
        #self.add_link('up4', L.Deconvolution2D(1024, 512, ksize=2, stride=2, pad=0))
        self.add_link('dc4_1',
                      L.Convolution2D(128, 64, ksize=3, stride=1, pad=pad))
        self.add_link('dc4_2',
                      L.Convolution2D(64, 64, ksize=3, stride=1, pad=pad))
        #self.add_link('up3', L.Deconvolution2D(512, 256, ksize=4, stride=2, pad=0))
        #self.add_link('up3', L.Deconvolution2D(512, 256, ksize=2, stride=2, pad=0))
        self.add_link('dc3_1',
                      L.Convolution2D(128, 64, ksize=3, stride=1, pad=pad))
        self.add_link('score',
                      L.Convolution2D(64, n_class, ksize=1, stride=1, pad=0))

        if bn:
            self.add_link('bnc1_1', L.BatchNormalization(64))
            self.add_link('bnc1_2', L.BatchNormalization(64))
            self.add_link('bnc2_1', L.BatchNormalization(64))
            self.add_link('bnc2_2', L.BatchNormalization(64))

            self.add_link('bndi1_1', L.BatchNormalization(64))
            self.add_link('bndi2_1', L.BatchNormalization(64))
            self.add_link('bndi3_1', L.BatchNormalization(64))
            #self.add_link('bnup4', L.BatchNormalization(512))
            self.add_link('bnd4_1', L.BatchNormalization(64))
            self.add_link('bnd4_2', L.BatchNormalization(64))
            #self.add_link('bnup3', L.BatchNormalization(256))
            self.add_link('bnd3_1', L.BatchNormalization(64))
            #self.add_link('bnd3_2', L.BatchNormalization(64))
        self.bn = bn
Пример #5
0
    def __init__(self,
                 dilations,
                 residual_channels=16,
                 dilation_channels=32,
                 skip_channels=128,
                 quantization_channels=256):
        '''
        Args:
            dilations (list of int): 
                A list with the dilation factor for each layer.
            residual_channels (int): 
                How many filters to learn for the residual.
            dilation_channels (int): 
                How many filters to learn for the dilated convolution.
            skip_channels (int): 
                How many filters to learn that contribute to the quantized softmax output.
            quantization_channels (int): 
                How many amplitude values to use for audio quantization and the corresponding 
                one-hot encoding.
                Default: 256 (8-bit quantization).
        '''

        super(WaveNet, self).__init__(
            # a "one-hot" causal conv
            causal_embedID=L.EmbedID(quantization_channels,
                                     2 * residual_channels),

            # last 3 layers (include convolution on skip-connections)
            conv1x1_0=L.Convolution2D(None, skip_channels, 1),
            conv1x1_1=L.Convolution2D(None, skip_channels, 1),
            conv1x1_2=L.Convolution2D(None, quantization_channels, 1),
        )
        # dilated stack
        for i, dilation in enumerate(dilations):
            self.add_link(
                'conv_filter{}'.format(i),
                L.DilatedConvolution2D(None,
                                       dilation_channels, (1, 2),
                                       dilate=dilation))
            self.add_link(
                'conv_gate{}'.format(i),
                L.DilatedConvolution2D(None,
                                       dilation_channels, (1, 2),
                                       dilate=dilation,
                                       bias=1))
            self.add_link(
                'conv_res{}'.format(i),
                L.Convolution2D(None, residual_channels, 1, nobias=True))

        self.residual_channels = residual_channels
        self.dilations = dilations
Пример #6
0
    def __init__(self, n_classes):
        self.n_classes = n_classes

        super().__init__(
            conv1_1=L.Convolution2D(None, 64, 3, pad=1, **self.conv_init),
            conv1_2=L.Convolution2D(None, 64, 3, pad=1, **self.conv_init),
            conv2_1=L.Convolution2D(None, 128, 3, pad=1, **self.conv_init),
            conv2_2=L.Convolution2D(None, 128, 3, pad=1, **self.conv_init),
            conv3_1=L.Convolution2D(None, 256, 3, pad=1, **self.conv_init),
            conv3_2=L.Convolution2D(None, 256, 3, pad=1, **self.conv_init),
            conv3_3=L.Convolution2D(None, 256, 3, pad=1, **self.conv_init),
            conv4_1=L.Convolution2D(None, 512, 3, pad=1, **self.conv_init),
            conv4_2=L.Convolution2D(None, 512, 3, pad=1, **self.conv_init),
            conv4_3=L.Convolution2D(None, 512, 3, pad=1, **self.conv_init),
            norm4=_Normalize(512, **self.norm_init),
            conv5_1=L.DilatedConvolution2D(None,
                                           512,
                                           3,
                                           pad=1,
                                           **self.conv_init),
            conv5_2=L.DilatedConvolution2D(None,
                                           512,
                                           3,
                                           pad=1,
                                           **self.conv_init),
            conv5_3=L.DilatedConvolution2D(None,
                                           512,
                                           3,
                                           pad=1,
                                           **self.conv_init),
            conv6=L.DilatedConvolution2D(None,
                                         1024,
                                         3,
                                         pad=6,
                                         dilate=6,
                                         **self.conv_init),
            conv7=L.Convolution2D(None, 1024, 1, **self.conv_init),
            loc=chainer.ChainList(),
            conf=chainer.ChainList(),
        )
        for ar in self.aspect_ratios:
            n = (len(ar) + 1) * 2
            self.loc.add_link(
                L.Convolution2D(None, n * 4, 3, pad=1, **self.conv_init))
            self.conf.add_link(
                L.Convolution2D(None,
                                n * (self.n_classes + 1),
                                3,
                                pad=1,
                                **self.conv_init))
Пример #7
0
    def __init__(self):
        super(DilatedGEN, self).__init__(
            dc1=L.Convolution2D(None, 48, 5, 2, 2, wscale=0.02*math.sqrt(1*5*5)),
            fc2=L.Convolution2D(48, 128, 3, 1, 1, wscale=0.02*math.sqrt(48*3*3)),
            fc3=L.Convolution2D(128, 128, 3, 1, 1, wscale=0.02*math.sqrt(128*3*3)),
            dc4=L.Convolution2D(128, 256, 3, 2, 1, wscale=0.02*math.sqrt(128*3*3)),
            fc5=L.Convolution2D(256, 256, 3, 1, 1, wscale=0.02*math.sqrt(256*3*3)),
            fc6=L.Convolution2D(256, 256, 3, 1, 1, wscale=0.02*math.sqrt(256*3*3)),
            dc7=L.Convolution2D(256, 256, 3, 2, 1, wscale=0.02*math.sqrt(256*3*3)),
            fc8=L.Convolution2D(256, 512, 3, 1, 1, wscale=0.02*math.sqrt(256*3*3)),
            fc9=L.DilatedConvolution2D(512, 1024, 3, 1, 2, dilate=2, wscale=0.02*math.sqrt(512*3*3)),
            fc10=L.DilatedConvolution2D(1024, 1024, 3, 1, 4, dilate=4, wscale=0.02*math.sqrt(1024*3*3)),
            fc11=L.DilatedConvolution2D(1024, 1024, 3, 1, 8, dilate=8, wscale=0.02*math.sqrt(1024*3*3)),
            fc12=L.Convolution2D(1024, 1024, 3, 1, 1, wscale=0.02*math.sqrt(1024*3*3)),
            fc13=L.Convolution2D(1024, 512, 3, 1, 1, wscale=0.02*math.sqrt(1024*3*3)),
            fc14=L.Convolution2D(512, 256, 3, 1, 1, wscale=0.02*math.sqrt(512*3*3)),
            uc15=L.Deconvolution2D(256, 256, 4, 2, 1, wscale=0.02*math.sqrt(256*4*4)),
            fc16=L.Convolution2D(256, 256, 3, 1, 1, wscale=0.02*math.sqrt(256*3*3)),
            fc17=L.Convolution2D(256, 128, 3, 1, 1, wscale=0.02*math.sqrt(256*3*3)),
            uc18=L.Deconvolution2D(128, 128, 4, 2, 1, wscale=0.02*math.sqrt(128*4*4)),
            fc19=L.Convolution2D(128, 128, 3, 1, 1, wscale=0.02*math.sqrt(128*3*3)),
            fc20=L.Convolution2D(128, 48, 3, 1, 1, wscale=0.02*math.sqrt(128*3*3)),
            uc21=L.Deconvolution2D(48, 48, 4, 2, 1, wscale=0.02*math.sqrt(48*4*4)),
            fc22=L.Convolution2D(48, 24, 3, 1, 1, wscale=0.02*math.sqrt(48*3*3)),
            fc23=L.Convolution2D(24, 1, 3, 1, 1, wscale=0.02*math.sqrt(24*3*3)),

            bn1=L.BatchNormalization(48),
            bn2=L.BatchNormalization(128),
            bn3=L.BatchNormalization(128),
            bn4=L.BatchNormalization(256),
            bn5=L.BatchNormalization(256),
            bn6=L.BatchNormalization(256),
            bn7=L.BatchNormalization(256),
            bn8=L.BatchNormalization(512),
            bn9=L.BatchNormalization(1024),
            bn10=L.BatchNormalization(1024),
            bn11=L.BatchNormalization(1024),
            bn12=L.BatchNormalization(1024),
            bn13=L.BatchNormalization(512),
            bn14=L.BatchNormalization(256),
            bn15=L.BatchNormalization(256),
            bn16=L.BatchNormalization(256),
            bn17=L.BatchNormalization(128),
            bn18=L.BatchNormalization(128),
            bn19=L.BatchNormalization(128),
            bn20=L.BatchNormalization(48),
            bn21=L.BatchNormalization(48),
            bn22=L.BatchNormalization(24)
        )
Пример #8
0
    def __init__(self, input_width=256, input_height=256, ch=256, wscale=0.2):
        super(Generator, self).__init__()
        self.input_width = input_width
        self.input_height = input_height
        self.ch = ch

        with self.init_scope():
            w = chainer.initializers.Normal(wscale)
            # O = (I + 2P - F) / S + 1
            self.c0 = L.Convolution2D(4, ch//4, 5, 1, 2, initialW=w)
            # change karnel size from original
            self.c1 = L.Convolution2D(ch//4, ch//2, 4, 2, 1, initialW=w)
            self.c2 = L.Convolution2D(ch//2, ch//2, 3, 1, 1, initialW=w)
            # same as up
            self.c3 = L.Convolution2D(ch//2, ch, 4, 2, 1, initialW=w)
            self.c4 = L.Convolution2D(ch, ch, 3, 1, 1, initialW=w)
            self.c5 = L.Convolution2D(ch, ch, 3, 1, 1, initialW=w)
            # dilated convolution
            # O = {I + 2P - F - (F-1) * (D-1)} / S + 1
            self.c6 = L.DilatedConvolution2D(ch, ch, 3, 1, 2, 2, initialW=w)
            self.c7 = L.DilatedConvolution2D(ch, ch, 3, 1, 4, 4, initialW=w)
            self.c8 = L.DilatedConvolution2D(ch, ch, 3, 1, 8, 8, initialW=w)
            self.c9 = L.DilatedConvolution2D(ch, ch, 3, 1, 16, 16, initialW=w)
            self.c10 = L.Convolution2D(ch, ch, 3, 1, 1, initialW=w)
            self.c11 = L.Convolution2D(ch, ch, 3, 1, 1, initialW=w)
            # deconv
            # O = S * (I-1) + F - 2P
            self.c12 = L.Deconvolution2D(ch, ch//2, 4, 2, 1, initialW=w)
            self.c13 = L.Convolution2D(ch//2, ch//2, 3, 1, 1, initialW=w)
            self.c14 = L.Deconvolution2D(ch//2, ch//4, 4, 2, 1, initialW=w)
            self.c15 = L.Convolution2D(ch//4, ch//8, 3, 1, 1, initialW=w)
            self.c16 = L.Convolution2D(ch//8, 3, 3, 1, 1, initialW=w)
            # BN
            self.bn0 = L.BatchNormalization(ch//4)
            self.bn1 = L.BatchNormalization(ch//2)
            self.bn2 = L.BatchNormalization(ch//2)
            self.bn3 = L.BatchNormalization(ch)
            self.bn4 = L.BatchNormalization(ch)
            self.bn5 = L.BatchNormalization(ch)
            self.bn6 = L.BatchNormalization(ch)
            self.bn7 = L.BatchNormalization(ch)
            self.bn8 = L.BatchNormalization(ch)
            self.bn9 = L.BatchNormalization(ch)
            self.bn10 = L.BatchNormalization(ch)
            self.bn11 = L.BatchNormalization(ch)
            self.bn12 = L.BatchNormalization(ch//2)
            self.bn13 = L.BatchNormalization(ch // 2)
            self.bn14 = L.BatchNormalization(ch // 4)
            self.bn15 = L.BatchNormalization(ch // 8)
Пример #9
0
    def __init__(self, numLabel, architecture, act):
        '''
		@param architecture: ((channel, input len, filter len), ...)
		'''
        super(Net, self).__init__()

        self.numLabel = numLabel
        self.structure = architecture
        self.act = act

        self.prev = [None] * len(architecture)

        for li, st in enumerate(architecture):
            numChannel, inputLen, filterLen = st

            if li == 0: inChannel = 1
            else: inChannel = architecture[li - 1][0]

            if filterLen == 1:
                assert inputLen == 1
                dil = 1
            else:
                assert (inputLen - 1) % (filterLen - 1) == 0
                dil = (inputLen - 1) // (filterLen - 1)

            conv = links.DilatedConvolution2D(inChannel, numChannel,
                                              (filterLen, 1), 1, 0, (dil, 1))
            super(Net, self).add_link("c" + str(li), conv)

        if numLabel > 0:
            full = links.Convolution2D(architecture[-1][0], numLabel, 1)
            super(Net, self).add_link("full", full)
Пример #10
0
 def __init__(self,
              in_ch,
              out_ch,
              ksize,
              stride=1,
              pad=1,
              dilation=1,
              nobias=False,
              upsample=False):
     super(Conv, self).__init__()
     with self.init_scope():
         if upsample:
             self.conv = L.Deconvolution2D(in_ch,
                                           out_ch,
                                           ksize,
                                           stride,
                                           pad,
                                           nobias=nobias)
         else:
             if dilation > 1:
                 self.conv = L.DilatedConvolution2D(in_ch,
                                                    out_ch,
                                                    ksize,
                                                    stride,
                                                    pad,
                                                    dilation,
                                                    nobias=nobias)
             else:
                 self.conv = L.Convolution2D(in_ch,
                                             out_ch,
                                             ksize,
                                             stride,
                                             pad,
                                             nobias=nobias)
Пример #11
0
    def __init__(self, in_channel, out_channel, ksize, dilate=1, causal=True):

        self.in_ch = in_channel
        self.out_ch = out_channel
        self.ksize = ksize
        self.dilate = dilate
        self.causal = causal
        self.conv_size = (self.ksize - 1) * self.dilate + 1

        layers = {}
        if self.dilate is None or self.dilate == 1:
            layers["conv"] = L.ConvolutionND(1,
                                             self.in_ch,
                                             self.out_ch,
                                             self.ksize,
                                             stride=1,
                                             pad=0)
        else:
            layers["conv"] = L.DilatedConvolution2D( \
                self.in_ch,
                self.out_ch,
                (self.ksize, 1),
                stride=1,
                pad=(0, 0),
                dilate = (self.dilate, 1)
                )

        super(DilateConvCausal1D, self).__init__(**layers)
Пример #12
0
 def __init__(self, n_in_channel, n_skip_channel, n_dilate, useGPU):
     dilatedConvTan = L.DilatedConvolution2D(None,
                                             n_skip_channel, (1, 2),
                                             dilate=n_dilate)
     dilatedConvSig = L.DilatedConvolution2D(None,
                                             n_skip_channel, (1, 2),
                                             dilate=n_dilate)
     conv = L.Convolution2D(n_skip_channel, n_in_channel, ksize=1)
     super(WaveBlock, self).__init__(
         dilatedConvTan=dilatedConvTan,
         dilatedConvSig=dilatedConvSig,
         conv=conv,
         batchNormSig=L.BatchNormalization(n_skip_channel),
         batchNormTan=L.BatchNormalization(n_skip_channel),
         batchNormCnn=L.BatchNormalization(n_skip_channel),
     )
     self.useGPU = useGPU
Пример #13
0
 def __init__(self, n_out):
     super().__init__(
         # the size of the inputs to each layer will be inferred
         l1=L.Convolution2D(None, 8, ksize=3),  # n_in -> n_units
         l2=L.DilatedConvolution2D(None, 16, ksize=3,
                                   dilate=2),  # n_units -> n_units
         l3=L.Linear(None, n_out),  # n_units -> n_out
     )
Пример #14
0
 def __init__(self, in_channels, out_channels, dilate):
     super(GAU, self).__init__()
     with self.init_scope():
         self.dilate = dilate
         # if not dilate:
         #     self.convT = L.Convolution2D(in_channels, out_channels, ksize=(1, 2), pad=(0, 1))
         #     self.convS = L.Convolution2D(in_channels, out_channels, ksize=(1, 2), pad=(0, 1))
         # else:
         self.convT = L.DilatedConvolution2D(in_channels,
                                             out_channels,
                                             ksize=(1, 2),
                                             dilate=(0, dilate))
         self.convS = L.DilatedConvolution2D(in_channels,
                                             out_channels,
                                             ksize=(1, 2),
                                             dilate=(0, dilate))
         self.conv = L.Convolution2D(out_channels, out_channels, ksize=1)
    def __init__(self, n_channel):
        super(Extractor, self).__init__()
        init = {
            'initialW': initializers.LeCunUniform(),
            'initial_bias': initializers.Zero(),
        }
        with self.init_scope():
            self.conv1_1 = L.Convolution2D(n_channel, 3, pad=1, **init)
            self.conv1_2 = L.Convolution2D(n_channel, 3, pad=1, **init)

            self.conv2_1 = L.Convolution2D(n_channel*2, 3, pad=1, **init)
            self.conv2_2 = L.Convolution2D(n_channel*2, 3, pad=1, **init)

            self.conv3_1 = L.DilatedConvolution2D(n_channel*4, 3, pad=1, dilate=2, **init)
            self.conv3_2 = L.DilatedConvolution2D(n_channel*4, 3, pad=1, dilate=2, **init)

            self.conv4_1 = L.Convolution2D(n_channel*8, 3, pad=1, **init)
            self.conv4_2 = L.Convolution2D(n_channel*8, 3, pad=1, **init)
Пример #16
0
 def setUp(self):
     self.link = links.DilatedConvolution2D(*self.args, **self.kwargs)
     self.x = numpy.random.uniform(-1, 1,
                                   (2, 3, 4, 3)).astype(numpy.float32)
     self.link(chainer.Variable(self.x))
     b = self.link.b.data
     b[...] = numpy.random.uniform(-1, 1, b.shape)
     self.link.cleargrads()
     self.gy = numpy.random.uniform(-1, 1,
                                    (2, 2, 2, 2)).astype(numpy.float32)
Пример #17
0
 def __init__(self, nb_in, nb_out, ksize=3, dilate=1, no_bn=False):
     super(DConv_BN, self).__init__()
     self.no_bn = no_bn
     with self.init_scope():
         self.conv = L.DilatedConvolution2D(nb_in,
                                            nb_out,
                                            ksize=(ksize, 1),
                                            pad=(dilate, 0),
                                            dilate=(dilate, 1))
         if not no_bn:
             self.bn = L.BatchNormalization(nb_out)
Пример #18
0
    def __init__(self, dilation, n_channel1, n_channel2, conditional, d):
        super(ResidualBlock, self).__init__()
        with self.init_scope():
            self.conv = L.DilatedConvolution2D(n_channel2,
                                               n_channel1 * 2,
                                               ksize=(2, 1),
                                               pad=(dilation, 0),
                                               dilate=(dilation, 1))
            if conditional:
                self.cond = L.DilatedConvolution2D(d,
                                                   n_channel1 * 2,
                                                   ksize=(2, 1),
                                                   pad=(dilation, 0),
                                                   dilate=(dilation, 1))
            self.proj = L.Convolution2D(n_channel1, n_channel2 * 2, 1)

        self.dilation = dilation
        self.n_channel2 = n_channel2
        self.d = d
        self.conditional = conditional
Пример #19
0
    def setUp(self):
        self.link = links.DilatedConvolution2D(
            3, 2, 3, stride=2, pad=2, dilate=2)
        b = self.link.b.data
        b[...] = numpy.random.uniform(-1, 1, b.shape)
        self.link.cleargrads()

        self.x = numpy.random.uniform(-1, 1,
                                      (2, 3, 4, 3)).astype(numpy.float32)
        self.gy = numpy.random.uniform(-1, 1,
                                       (2, 2, 2, 2)).astype(numpy.float32)
Пример #20
0
 def __init__(self, n_classes, aspect_ratios):
     init = {
         'initialW': initializers.GlorotUniform(),
         'initial_bias': initializers.Zero(),
     }
     super().__init__(
         conv1_1=L.Convolution2D(None, 64, 3, pad=1, **init),
         conv1_2=L.Convolution2D(None, 64, 3, pad=1, **init),
         conv2_1=L.Convolution2D(None, 128, 3, pad=1, **init),
         conv2_2=L.Convolution2D(None, 128, 3, pad=1, **init),
         conv3_1=L.Convolution2D(None, 256, 3, pad=1, **init),
         conv3_2=L.Convolution2D(None, 256, 3, pad=1, **init),
         conv3_3=L.Convolution2D(None, 256, 3, pad=1, **init),
         conv4_1=L.Convolution2D(None, 512, 3, pad=1, **init),
         conv4_2=L.Convolution2D(None, 512, 3, pad=1, **init),
         conv4_3=L.Convolution2D(None, 512, 3, pad=1, **init),
         norm4=Normalize(512, initial=initializers.Constant(20)),
         conv5_1=L.DilatedConvolution2D(None, 512, 3, pad=1, **init),
         conv5_2=L.DilatedConvolution2D(None, 512, 3, pad=1, **init),
         conv5_3=L.DilatedConvolution2D(None, 512, 3, pad=1, **init),
         conv6=L.DilatedConvolution2D(None,
                                      1024,
                                      3,
                                      pad=6,
                                      dilate=6,
                                      **init),
         conv7=L.Convolution2D(None, 1024, 1, **init),
         conv8_1=L.Convolution2D(None, 256, 1, **init),
         conv8_2=L.Convolution2D(None, 512, 3, stride=2, pad=1, **init),
         conv9_1=L.Convolution2D(None, 128, 1, **init),
         conv9_2=L.Convolution2D(None, 256, 3, stride=2, pad=1, **init),
         conv10_1=L.Convolution2D(None, 128, 1, **init),
         conv10_2=L.Convolution2D(None, 256, 3, **init),
         conv11_1=L.Convolution2D(None, 128, 1, **init),
         conv11_2=L.Convolution2D(None, 256, 3, **init),
         multibox=MultiBox(n_classes,
                           aspect_ratios=aspect_ratios,
                           init=init),
     )
     self.n_classes = n_classes
     self.aspect_ratios = aspect_ratios
 def setUp(self):
     in_channels = None
     self.link = links.DilatedConvolution2D(
         in_channels, 2, 3, stride=2, pad=2, dilate=2)
     self.x = numpy.random.uniform(-1, 1,
                                   (2, 3, 4, 3)).astype(numpy.float32)
     self.link(chainer.Variable(self.x))
     b = self.link.b.data
     b[...] = numpy.random.uniform(-1, 1, b.shape)
     self.link.cleargrads()
     self.gy = numpy.random.uniform(-1, 1,
                                    (2, 2, 2, 2)).astype(numpy.float32)
Пример #22
0
 def __init__(self, number_features, dilation_factor, nobias):
     self.dilation_factor = dilation_factor
     super(convolutional_layer, self).__init__(
         # the size of the inputs to each layer will be inferred
         L.DilatedConvolution2D(
             in_channels=number_features,
             out_channels=number_features,
             ksize=(3, 1),
             dilate=dilation_factor,
             pad=(dilation_factor, 0),
             initialW=chainer.initializers.HeNormal(),
             nobias=nobias))
Пример #23
0
    def __init__(self, d_factor, weight, bias):
        super(DilatedConvBlock, self).__init__(diconv=L.DilatedConvolution2D(
            in_channels=16,
            out_channels=16,
            ksize=3,
            stride=1,
            pad=d_factor,
            dilate=d_factor,
            nobias=False,
            initialW=weight,
            initial_bias=bias), )

        self.train = True
Пример #24
0
    def __init__(self, d_factor):
        super(DilatedConvBlock,
              self).__init__(diconv=L.DilatedConvolution2D(in_channels=64,
                                                           out_channels=64,
                                                           ksize=3,
                                                           stride=1,
                                                           pad=d_factor,
                                                           dilate=d_factor,
                                                           nobias=False),
                             #bn=L.BatchNormalization(64)
                             )

        self.train = True
Пример #25
0
    def __init__(self):
        super(VGG16, self).__init__()
        with self.init_scope():
            self.conv1_1 = L.Convolution2D(64, 3, pad=1)
            self.conv1_2 = L.Convolution2D(64, 3, pad=1)

            self.conv2_1 = L.Convolution2D(128, 3, pad=1)
            self.conv2_2 = L.Convolution2D(128, 3, pad=1)

            self.conv3_1 = L.Convolution2D(256, 3, pad=1)
            self.conv3_2 = L.Convolution2D(256, 3, pad=1)
            self.conv3_3 = L.Convolution2D(256, 3, pad=1)

            self.conv4_1 = L.Convolution2D(512, 3, pad=1)
            self.conv4_2 = L.Convolution2D(512, 3, pad=1)
            self.conv4_3 = L.Convolution2D(512, 3, pad=1)

            self.conv5_1 = L.DilatedConvolution2D(512, 3, pad=1)
            self.conv5_2 = L.DilatedConvolution2D(512, 3, pad=1)
            self.conv5_3 = L.DilatedConvolution2D(512, 3, pad=1)

            self.conv6 = L.DilatedConvolution2D(1024, 3, pad=6, dilate=6)
            self.conv7 = L.Convolution2D(1024, 1)
Пример #26
0
 def __init__(self, in_channels, out_channels, dilate):
     super(Swish, self).__init__()
     with self.init_scope():
         self.dilate = dilate
         if not dilate:
             self.conv = L.Convolution2D(in_channels,
                                         out_channels,
                                         ksize=(1, 3),
                                         pad=(0, 1))
         else:
             self.conv = L.DilatedConvolution2D(in_channels,
                                                out_channels,
                                                ksize=(1, 3),
                                                dilate=(0, dilate),
                                                pad=(0, dilate))
Пример #27
0
    def __init__(self, in_size, ch):
        super(DilatedBottleNeckB, self).__init__()
        initialW = initializers.HeNormal()

        with self.init_scope():
            self.conv1 = L.Convolution2D(
                in_size, ch, 1, 1, 0, initialW=initialW, nobias=True)
            self.bn1 = L.BatchNormalization(ch, eps=self.eps)
            self.conv2 = L.DilatedConvolution2D(
                ch, ch, 3, 1, 2, dilate=2,
                initialW=initialW, nobias=True)
            self.bn2 = L.BatchNormalization(ch, eps=self.eps)
            self.conv3 = L.Convolution2D(
                ch, in_size, 1, 1, 0, initialW=initialW, nobias=True)
            self.bn3 = L.BatchNormalization(in_size, eps=self.eps)
Пример #28
0
    def __init__(self):
        super(VGG16RefineDet, self).__init__()
        with self.init_scope():
            self.conv1_1 = L.Convolution2D(64, 3, pad=1)
            self.conv1_2 = L.Convolution2D(64, 3, pad=1)

            self.conv2_1 = L.Convolution2D(128, 3, pad=1)
            self.conv2_2 = L.Convolution2D(128, 3, pad=1)

            self.conv3_1 = L.Convolution2D(256, 3, pad=1)
            self.conv3_2 = L.Convolution2D(256, 3, pad=1)
            self.conv3_3 = L.Convolution2D(256, 3, pad=1)

            self.conv4_1 = L.Convolution2D(512, 3, pad=1)
            self.conv4_2 = L.Convolution2D(512, 3, pad=1)
            self.conv4_3 = L.Convolution2D(512, 3, pad=1)
            self.norm4 = Normalize(512, initial=initializers.Constant(20))

            self.conv5_1 = L.DilatedConvolution2D(512, 3, pad=1)
            self.conv5_2 = L.DilatedConvolution2D(512, 3, pad=1)
            self.conv5_3 = L.DilatedConvolution2D(512, 3, pad=1)

            self.conv6 = L.DilatedConvolution2D(1024, 3, pad=3, dilate=3)
            self.conv7 = L.Convolution2D(1024, 1)
Пример #29
0
    def __init__(self, filter_size, dilation,
                 residual_channels, dilated_channels, skip_channels):
        super(ResidualBlock, self).__init__()
        with self.init_scope():
            self.conv = L.DilatedConvolution2D(
                residual_channels, dilated_channels,
                ksize=(filter_size, 1),
                pad=(dilation * (filter_size - 1), 0), dilate=(dilation, 1))
            self.res = L.Convolution2D(
                dilated_channels // 2, residual_channels, 1)
            self.skip = L.Convolution2D(
                dilated_channels // 2, skip_channels, 1)

        self.filter_size = filter_size
        self.dilation = dilation
        self.residual_channels = residual_channels
Пример #30
0
 def __init__(self, in_ch, out_ch, ksize, stride=1, pad=1, dilation=1):
     super(ConvBNReLU, self).__init__()
     comm = chainer.config.comm
     w = chainer.config.initialW
     with self.init_scope():
         if dilation > 1:
             self.conv = L.DilatedConvolution2D(
                 in_ch, out_ch, ksize, stride, pad, dilation, True, w)
         else:
             self.conv = L.Convolution2D(
                 in_ch, out_ch, ksize, stride, pad, True, w)
         if comm is not None:
             self.bn = MultiNodeBatchNormalization(
                 out_ch, comm, eps=1e-5, decay=0.95)
         else:
             self.bn = L.BatchNormalization(out_ch, eps=1e-5, decay=0.95)