示例#1
0
    def _setup_convolution(self, layer):
        blobs = layer.blobs
        param = layer.convolution_param
        ksize = _get_ksize(param)
        stride = _get_stride(param)
        pad = _get_pad(param)
        num = _get_num(blobs[0])
        channels = _get_channels(blobs[0])
        bias_term = param.bias_term

        n_in = channels * param.group
        n_out = num

        func = convolution_2d.Convolution2D(
            n_in,
            n_out,
            ksize,
            stride,
            pad,
            nobias=not bias_term,
            initialW=_ConvolutionBlob(blobs[0], param.group),
            initial_bias=_Blob(blobs[1]) if bias_term else None)

        with self.init_scope():
            setattr(self, layer.name, func)
        self.forwards[layer.name] = _CallChildLink(self, layer.name)
        self._add_layer(layer)
示例#2
0
    def _setup_convolution(self, layer):
        blobs = layer.blobs
        param = layer.convolution_param
        ksize = _get_ksize(param)
        stride = _get_stride(param)
        pad = _get_pad(param)
        num = _get_num(blobs[0])
        channels = _get_channels(blobs[0])

        n_in = channels * param.group
        n_out = num
        func = convolution_2d.Convolution2D(n_in, n_out, ksize, stride, pad,
                                            nobias=not param.bias_term)
        func.W.data[...] = 0

        part_size = len(blobs[0].data) // param.group
        for i in six.moves.range(param.group):
            in_slice = slice(i * n_in // param.group,
                             (i + 1) * n_in // param.group)
            out_slice = slice(i * n_out // param.group,
                              (i + 1) * n_out // param.group)
            w = func.W.data[out_slice, in_slice]

            data = numpy.array(
                blobs[0].data[i * part_size:(i + 1) * part_size])
            w[:] = data.reshape(w.shape)

        if param.bias_term:
            func.b.data[:] = blobs[1].data

        self.add_link(layer.name, func)
        self.forwards[layer.name] = _CallChildLink(self, layer.name)
        self._add_layer(layer)
示例#3
0
        def inception_0(input_channel, pool_channel):
            # 1x1
            s1 = AuxConv(
                C.Convolution2D(input_channel, 64, 1, use_cudnn=use_cudnn))

            # 5x5
            s21 = AuxConv(
                C.Convolution2D(input_channel, 48, 1, use_cudnn=use_cudnn))
            s22 = AuxConv(
                C.Convolution2D(48, 64, 5, pad=2, use_cudnn=use_cudnn))
            s2 = Sequential(s21, s22)

            # double 3x3
            s31 = AuxConv(
                C.Convolution2D(input_channel, 64, 1, use_cudnn=use_cudnn))
            s32 = AuxConv(
                C.Convolution2D(64, 96, 3, pad=1, use_cudnn=use_cudnn))
            s33 = AuxConv(
                C.Convolution2D(96, 96, 3, pad=1, use_cudnn=use_cudnn))
            s3 = Sequential(s31, s32, s33)

            # pool
            s4 = AuxConv(C.Convolution2D(input_channel,
                                         pool_channel,
                                         3,
                                         pad=1,
                                         use_cudnn=use_cudnn),
                         pool=M.MaxPooling2D(3, 1, 1, use_cudnn=use_cudnn))

            return Inception(s1, s2, s3, s4)
    def __init__(self,
                 in_channels,
                 out_channels,
                 ksize=None,
                 stride=1,
                 pad=0,
                 activation=relu.relu,
                 *args,
                 **kwargs):

        # If `args` is not empty, users assume the API for v1 and
        # specify `wscale` as a positonal argument, which we want
        # to detect and forbid with an explicit error message.
        msg = ('wscale is not supported anymore. '
               'Use conv_init and bias_init argument to change '
               'the scale of initial parameters.')
        if args:
            raise TypeError(msg)
        argument.check_unexpected_kwargs(kwargs, wscale=msg)
        conv_init, bias_init = argument.parse_kwargs(kwargs,
                                                     ('conv_init', None),
                                                     ('bias_init', None))

        if ksize is None:
            out_channels, ksize, in_channels = in_channels, out_channels, None

        assert len(out_channels) > 0
        convs = [
            convolution_2d.Convolution2D(in_channels,
                                         out_channels[0],
                                         ksize,
                                         stride,
                                         pad,
                                         initialW=conv_init,
                                         initial_bias=bias_init)
        ]
        for n_in, n_out in zip(out_channels, out_channels[1:]):
            convs.append(
                convolution_2d.Convolution2D(n_in,
                                             n_out,
                                             1,
                                             initialW=conv_init,
                                             initial_bias=bias_init))
        super(MLPConvolution2D, self).__init__(*convs)
        self.activation = activation
示例#5
0
        def inception_2(input_channel):
            # 1x1
            s1 = AuxConv(
                C.Convolution2D(input_channel, 320, 1, use_cudnn=use_cudnn))

            # 3x3
            s21 = AuxConv(
                C.Convolution2D(input_channel, 384, 1, use_cudnn=use_cudnn))
            s22 = Inception(
                AuxConv(
                    C.Convolution2D(384,
                                    384, (1, 3),
                                    pad=(0, 1),
                                    use_cudnn=use_cudnn)),
                AuxConv(
                    C.Convolution2D(384,
                                    384, (3, 1),
                                    pad=(1, 0),
                                    use_cudnn=use_cudnn)))
            s2 = Sequential(s21, s22)

            # double 3x3
            s31 = AuxConv(
                C.Convolution2D(input_channel, 448, 1, use_cudnn=use_cudnn))
            s32 = AuxConv(
                C.Convolution2D(448, 384, 3, pad=1, use_cudnn=use_cudnn))
            s331 = AuxConv(
                C.Convolution2D(384,
                                384, (1, 3),
                                pad=(0, 1),
                                use_cudnn=use_cudnn))
            s332 = AuxConv(
                C.Convolution2D(384,
                                384, (3, 1),
                                pad=(1, 0),
                                use_cudnn=use_cudnn))
            s33 = Inception(s331, s332)
            s3 = Sequential(s31, s32, s33)

            # pool
            s4 = AuxConv(C.Convolution2D(input_channel,
                                         192,
                                         3,
                                         pad=1,
                                         use_cudnn=use_cudnn),
                         pool=A.AveragePooling2D(3, 1, 1, use_cudnn=use_cudnn))
            return Inception(s1, s2, s3, s4)
示例#6
0
 def __init__(self, in_size, ch, out_size, stride=2, use_cudnn=True):
     w = math.sqrt(2)
     super(BottleNeckA, self).__init__(
         conv1=convolution_2d.Convolution2D(in_size,
                                            ch,
                                            1,
                                            stride,
                                            0,
                                            w,
                                            nobias=True,
                                            use_cudnn=use_cudnn),
         bn1=batch_normalization.BatchNormalization(ch),
         conv2=convolution_2d.Convolution2D(ch,
                                            ch,
                                            3,
                                            1,
                                            1,
                                            w,
                                            nobias=True,
                                            use_cudnn=use_cudnn),
         bn2=batch_normalization.BatchNormalization(ch),
         conv3=convolution_2d.Convolution2D(ch,
                                            out_size,
                                            1,
                                            1,
                                            0,
                                            w,
                                            nobias=True,
                                            use_cudnn=use_cudnn),
         bn3=batch_normalization.BatchNormalization(out_size),
         conv4=convolution_2d.Convolution2D(in_size,
                                            out_size,
                                            1,
                                            stride,
                                            0,
                                            w,
                                            nobias=True,
                                            use_cudnn=use_cudnn),
         bn4=batch_normalization.BatchNormalization(out_size),
     )
示例#7
0
文件: Models.py 项目: ebsrn/CORE
 def __init__(self,
              ich,
              och,
              ksize,
              stride,
              pad,
              init_weights,
              pool=None,
              nobias=False):
     super(Conv,
           self).__init__(conv=C.Convolution2D(ich, och, ksize, stride, pad,
                                               nobias), )
     self.pool = pool
示例#8
0
 def __init__(self, use_cudnn=True):
     super(VGG_A, self).__init__(
         conv1=convolution_2d.Convolution2D(3, 64, 3, pad=1, use_cudnn=use_cudnn),
         conv2=convolution_2d.Convolution2D(64, 128, 3, pad=1, use_cudnn=use_cudnn),
         conv3_1=convolution_2d.Convolution2D(128, 256, 3, pad=1, use_cudnn=use_cudnn),
         conv3_2=convolution_2d.Convolution2D(256, 256, 3, pad=1, use_cudnn=use_cudnn),
         conv4_1=convolution_2d.Convolution2D(256, 512, 3, pad=1, use_cudnn=use_cudnn),
         conv4_2=convolution_2d.Convolution2D(512, 512, 3, pad=1, use_cudnn=use_cudnn),
         conv5_1=convolution_2d.Convolution2D(512, 512, 3, pad=1, use_cudnn=use_cudnn),
         conv5_2=convolution_2d.Convolution2D(512, 512, 3, pad=1, use_cudnn=use_cudnn),
         fc6=linear.Linear(512 * 7 * 7, 4096),
         fc7=linear.Linear(4096, 4096),
         fc8=linear.Linear(4096, 1000)
     )
     self.use_cudnn = use_cudnn
示例#9
0
文件: Models.py 项目: ebsrn/CORE
 def __init__(self, ich, och, ksize, stride, pad, init_weights, pool=None):
     super(ConvBN, self).__init__(
         conv=C.Convolution2D(ich, och, ksize, stride, pad, nobias=True),
         bn=B.BatchNormalization(och),
     )
     self.pool = pool
     if init_weights:
         f = h5py.File('%s/data/dump/%s.h5' % (os.getcwd(), init_weights),
                       'r')
         self.conv.W.data = np.array(f['weights']).transpose([3, 2, 0, 1])
         self.bn.beta.data = np.array(f['beta'])
         self.bn.gamma.data = np.array(f['gamma'])
         self.bn.avg_mean = np.array(f['mean'])
         self.bn.avg_var = np.array(f['var'])
示例#10
0
 def __init__(self, in_channels, out1, proj3, out3, proj5, out5, proj_pool):
     super(Inception, self).__init__(
         conv1=convolution_2d.Convolution2D(in_channels, out1, 1),
         proj3=convolution_2d.Convolution2D(in_channels, proj3, 1),
         conv3=convolution_2d.Convolution2D(proj3, out3, 3, pad=1),
         proj5=convolution_2d.Convolution2D(in_channels, proj5, 1),
         conv5=convolution_2d.Convolution2D(proj5, out5, 5, pad=2),
         projp=convolution_2d.Convolution2D(in_channels, proj_pool, 1),
     )
示例#11
0
 def __init__(self,
              in_channels,
              out_channels,
              ksize,
              stride=1,
              pad=0,
              wscale=1,
              activation=relu.relu,
              use_cudnn=True):
     assert len(out_channels) > 0
     convs = [
         convolution_2d.Convolution2D(in_channels,
                                      out_channels[0],
                                      ksize,
                                      stride,
                                      pad,
                                      wscale=wscale,
                                      use_cudnn=use_cudnn)
     ]
     for n_in, n_out in zip(out_channels, out_channels[1:]):
         convs.append(
             convolution_2d.Convolution2D(n_in, n_out, 1, wscale=wscale))
     super(MLPConvolution2D, self).__init__(*convs)
     self.activation = activation
示例#12
0
    def __init__(self, in_channels, out1, proj3, out3, proj33, out33,
                 pooltype, proj_pool=None, stride=1, conv_init=None,
                 dtype=numpy.float32):
        super(InceptionBN, self).__init__(
            proj3=convolution_2d.Convolution2D(
                in_channels, proj3, 1, nobias=True, initialW=conv_init),
            conv3=convolution_2d.Convolution2D(
                proj3, out3, 3, pad=1, stride=stride, nobias=True,
                initialW=conv_init),
            proj33=convolution_2d.Convolution2D(
                in_channels, proj33, 1, nobias=True, initialW=conv_init),
            conv33a=convolution_2d.Convolution2D(
                proj33, out33, 3, pad=1, nobias=True, initialW=conv_init),
            conv33b=convolution_2d.Convolution2D(
                out33, out33, 3, pad=1, stride=stride, nobias=True,
                initialW=conv_init),
            proj3n=batch_normalization.BatchNormalization(proj3, dtype=dtype),
            conv3n=batch_normalization.BatchNormalization(out3, dtype=dtype),
            proj33n=batch_normalization.BatchNormalization(proj33,
                                                           dtype=dtype),
            conv33an=batch_normalization.BatchNormalization(out33,
                                                            dtype=dtype),
            conv33bn=batch_normalization.BatchNormalization(out33,
                                                            dtype=dtype),
        )

        if out1 > 0:
            assert stride == 1
            assert proj_pool is not None
            self.add_link('conv1',
                          convolution_2d.Convolution2D(in_channels, out1, 1,
                                                       stride=stride,
                                                       nobias=True,
                                                       initialW=conv_init))
            self.add_link('conv1n', batch_normalization.BatchNormalization(
                out1, dtype=dtype))
        self.out1 = out1

        if proj_pool is not None:
            self.add_link('poolp', convolution_2d.Convolution2D(
                in_channels, proj_pool, 1, nobias=True, initialW=conv_init))
            self.add_link('poolpn', batch_normalization.BatchNormalization(
                proj_pool, dtype=dtype))
        self.proj_pool = proj_pool

        self.stride = stride
        self.pooltype = pooltype
        if pooltype != 'max' and pooltype != 'avg':
            raise NotImplementedError()

        self.train = True
示例#13
0
    def __init__(self, in_channels, out1, proj3, out3, proj33, out33,
                 pooltype, proj_pool=None, stride=1, conv_init=None,
                 dtype=None):
        super(InceptionBN, self).__init__()
        self.out1 = out1
        self.proj_pool = proj_pool
        self.stride = stride
        self.pooltype = pooltype
        if pooltype != 'max' and pooltype != 'avg':
            raise NotImplementedError()
        dtype = chainer.get_dtype(dtype)

        with self.init_scope():
            self.proj3 = convolution_2d.Convolution2D(
                in_channels, proj3, 1, nobias=True, initialW=conv_init)
            self.conv3 = convolution_2d.Convolution2D(
                proj3, out3, 3, pad=1, stride=stride, nobias=True,
                initialW=conv_init)
            self.proj33 = convolution_2d.Convolution2D(
                in_channels, proj33, 1, nobias=True, initialW=conv_init)
            self.conv33a = convolution_2d.Convolution2D(
                proj33, out33, 3, pad=1, nobias=True, initialW=conv_init)
            self.conv33b = convolution_2d.Convolution2D(
                out33, out33, 3, pad=1, stride=stride, nobias=True,
                initialW=conv_init)
            self.proj3n = batch_normalization.BatchNormalization(
                proj3, dtype=dtype)
            self.conv3n = batch_normalization.BatchNormalization(
                out3, dtype=dtype)
            self.proj33n = batch_normalization.BatchNormalization(
                proj33, dtype=dtype)
            self.conv33an = batch_normalization.BatchNormalization(
                out33, dtype=dtype)
            self.conv33bn = batch_normalization.BatchNormalization(
                out33, dtype=dtype)

            if out1 > 0:
                assert stride == 1
                assert proj_pool is not None
                self.conv1 = convolution_2d.Convolution2D(
                    in_channels, out1, 1, stride=stride, nobias=True,
                    initialW=conv_init)
                self.conv1n = batch_normalization.BatchNormalization(
                    out1, dtype=dtype)

            if proj_pool is not None:
                self.poolp = convolution_2d.Convolution2D(
                    in_channels, proj_pool, 1, nobias=True, initialW=conv_init)
                self.poolpn = batch_normalization.BatchNormalization(
                    proj_pool, dtype=dtype)
示例#14
0
 def __init__(self, use_cudnn=True):
     w = math.sqrt(2)
     super(ResNet50, self).__init__(
         conv1=convolution_2d.Convolution2D(3,
                                            64,
                                            7,
                                            2,
                                            3,
                                            w,
                                            nobias=True,
                                            use_cudnn=use_cudnn),
         bn1=batch_normalization.BatchNormalization(64),
         res2=Block(3, 64, 64, 256, 1, use_cudnn=use_cudnn),
         res3=Block(4, 256, 128, 512, use_cudnn=use_cudnn),
         res4=Block(6, 512, 256, 1024, use_cudnn=use_cudnn),
         res5=Block(3, 1024, 512, 2048, use_cudnn=use_cudnn),
         fc=linear.Linear(2048, 1000),
     )
     self.use_cudnn = use_cudnn
     self.train = True
 def __init__(self,
              in_channels,
              out1,
              proj3,
              out3,
              proj5,
              out5,
              proj_pool,
              conv_init=None,
              bias_init=None):
     super(Inception, self).__init__()
     with self.init_scope():
         self.conv1 = convolution_2d.Convolution2D(in_channels,
                                                   out1,
                                                   1,
                                                   initialW=conv_init,
                                                   initial_bias=bias_init)
         self.proj3 = convolution_2d.Convolution2D(in_channels,
                                                   proj3,
                                                   1,
                                                   initialW=conv_init,
                                                   initial_bias=bias_init)
         self.conv3 = convolution_2d.Convolution2D(proj3,
                                                   out3,
                                                   3,
                                                   pad=1,
                                                   initialW=conv_init,
                                                   initial_bias=bias_init)
         self.proj5 = convolution_2d.Convolution2D(in_channels,
                                                   proj5,
                                                   1,
                                                   initialW=conv_init,
                                                   initial_bias=bias_init)
         self.conv5 = convolution_2d.Convolution2D(proj5,
                                                   out5,
                                                   5,
                                                   pad=2,
                                                   initialW=conv_init,
                                                   initial_bias=bias_init)
         self.projp = convolution_2d.Convolution2D(in_channels,
                                                   proj_pool,
                                                   1,
                                                   initialW=conv_init,
                                                   initial_bias=bias_init)
示例#16
0
    def __init__(self, use_cudnn=True):
        convolution = link.ChainList(
            AuxConv(C.Convolution2D(3, 32, 3, 2, use_cudnn=use_cudnn)),
            AuxConv(C.Convolution2D(32, 32, 3, use_cudnn=use_cudnn)),
            AuxConv(C.Convolution2D(32, 64, 3, 1, 1, use_cudnn=use_cudnn)),
            AuxConv(C.Convolution2D(64, 80, 3, 1, 1, use_cudnn=use_cudnn)),
            AuxConv(C.Convolution2D(80, 192, 3, use_cudnn=use_cudnn)))

        def inception_0(input_channel, pool_channel):
            # 1x1
            s1 = AuxConv(
                C.Convolution2D(input_channel, 64, 1, use_cudnn=use_cudnn))

            # 5x5
            s21 = AuxConv(
                C.Convolution2D(input_channel, 48, 1, use_cudnn=use_cudnn))
            s22 = AuxConv(
                C.Convolution2D(48, 64, 5, pad=2, use_cudnn=use_cudnn))
            s2 = Sequential(s21, s22)

            # double 3x3
            s31 = AuxConv(
                C.Convolution2D(input_channel, 64, 1, use_cudnn=use_cudnn))
            s32 = AuxConv(
                C.Convolution2D(64, 96, 3, pad=1, use_cudnn=use_cudnn))
            s33 = AuxConv(
                C.Convolution2D(96, 96, 3, pad=1, use_cudnn=use_cudnn))
            s3 = Sequential(s31, s32, s33)

            # pool
            s4 = AuxConv(C.Convolution2D(input_channel,
                                         pool_channel,
                                         3,
                                         pad=1,
                                         use_cudnn=use_cudnn),
                         pool=M.MaxPooling2D(3, 1, 1, use_cudnn=use_cudnn))

            return Inception(s1, s2, s3, s4)

        inception0 = Sequential(*[
            inception_0(input_channel, pool_channel) for input_channel,
            pool_channel in zip([192, 256, 288], [32, 64, 64])
        ])

        grid_reduction0 = Inception(
            # strided 3x3
            AuxConv(C.Convolution2D(288, 384, 3, 2, use_cudnn=use_cudnn)),
            # double 3x3
            Sequential(
                AuxConv(C.Convolution2D(288, 64, 1, use_cudnn=use_cudnn)),
                AuxConv(C.Convolution2D(64, 96, 3, pad=1,
                                        use_cudnn=use_cudnn)),
                AuxConv(C.Convolution2D(96, 96, 3, 2, use_cudnn=use_cudnn))),
            # pool
            pool=M.MaxPooling2D(3, 2))

        def inception_1(hidden_channel):
            # 1x1
            s1 = AuxConv(C.Convolution2D(768, 192, 1, use_cudnn=use_cudnn))

            # 7x7
            s21 = AuxConv(
                C.Convolution2D(768, hidden_channel, 1, use_cudnn=use_cudnn))
            s22 = AuxConv(
                C.Convolution2D(hidden_channel,
                                hidden_channel, (1, 7),
                                pad=(0, 3),
                                use_cudnn=use_cudnn))
            s23 = AuxConv(
                C.Convolution2D(hidden_channel,
                                192, (7, 1),
                                pad=(3, 0),
                                use_cudnn=use_cudnn))
            s2 = Sequential(s21, s22, s23)

            # double 7x7
            s31 = AuxConv(
                C.Convolution2D(768, hidden_channel, 1, use_cudnn=use_cudnn))
            s32 = AuxConv(
                C.Convolution2D(hidden_channel,
                                hidden_channel, (1, 7),
                                pad=(0, 3),
                                use_cudnn=use_cudnn))
            s33 = AuxConv(
                C.Convolution2D(hidden_channel,
                                hidden_channel, (7, 1),
                                pad=(3, 0),
                                use_cudnn=use_cudnn))
            s34 = AuxConv(
                C.Convolution2D(hidden_channel,
                                hidden_channel, (1, 7),
                                pad=(0, 3),
                                use_cudnn=use_cudnn))
            s35 = AuxConv(
                C.Convolution2D(hidden_channel,
                                192, (7, 1),
                                pad=(3, 0),
                                use_cudnn=use_cudnn))
            s3 = Sequential(s31, s32, s33, s34, s35)

            # pool
            s4 = AuxConv(C.Convolution2D(768,
                                         192,
                                         3,
                                         pad=1,
                                         use_cudnn=use_cudnn),
                         pool=A.AveragePooling2D(3, 1, 1, use_cudnn=use_cudnn))

            return Inception(s1, s2, s3, s4)

        inception1 = Sequential(
            *[inception_1(c) for c in [128, 160, 160, 192]])

        grid_reduction1 = Inception(
            # strided 3x3
            Sequential(
                AuxConv(C.Convolution2D(768, 192, 1, use_cudnn=use_cudnn)),
                AuxConv(C.Convolution2D(192, 320, 3, 2, use_cudnn=use_cudnn))),
            # 7x7 and 3x3
            Sequential(
                AuxConv(C.Convolution2D(768, 192, 1, use_cudnn=use_cudnn)),
                AuxConv(
                    C.Convolution2D(192,
                                    192, (1, 7),
                                    pad=(0, 3),
                                    use_cudnn=use_cudnn)),
                AuxConv(
                    C.Convolution2D(192,
                                    192, (7, 1),
                                    pad=(3, 0),
                                    use_cudnn=use_cudnn)),
                AuxConv(C.Convolution2D(192, 192, 3, 2, use_cudnn=use_cudnn))),
            # pool
            pool=M.MaxPooling2D(3, 2, use_cudnn=use_cudnn))

        def inception_2(input_channel):
            # 1x1
            s1 = AuxConv(
                C.Convolution2D(input_channel, 320, 1, use_cudnn=use_cudnn))

            # 3x3
            s21 = AuxConv(
                C.Convolution2D(input_channel, 384, 1, use_cudnn=use_cudnn))
            s22 = Inception(
                AuxConv(
                    C.Convolution2D(384,
                                    384, (1, 3),
                                    pad=(0, 1),
                                    use_cudnn=use_cudnn)),
                AuxConv(
                    C.Convolution2D(384,
                                    384, (3, 1),
                                    pad=(1, 0),
                                    use_cudnn=use_cudnn)))
            s2 = Sequential(s21, s22)

            # double 3x3
            s31 = AuxConv(
                C.Convolution2D(input_channel, 448, 1, use_cudnn=use_cudnn))
            s32 = AuxConv(
                C.Convolution2D(448, 384, 3, pad=1, use_cudnn=use_cudnn))
            s331 = AuxConv(
                C.Convolution2D(384,
                                384, (1, 3),
                                pad=(0, 1),
                                use_cudnn=use_cudnn))
            s332 = AuxConv(
                C.Convolution2D(384,
                                384, (3, 1),
                                pad=(1, 0),
                                use_cudnn=use_cudnn))
            s33 = Inception(s331, s332)
            s3 = Sequential(s31, s32, s33)

            # pool
            s4 = AuxConv(C.Convolution2D(input_channel,
                                         192,
                                         3,
                                         pad=1,
                                         use_cudnn=use_cudnn),
                         pool=A.AveragePooling2D(3, 1, 1, use_cudnn=use_cudnn))
            return Inception(s1, s2, s3, s4)

        inception2 = Sequential(
            *[inception_2(input_channel) for input_channel in [1280, 2048]])

        auxiliary_convolution = Sequential(
            AuxConv(C.Convolution2D(768, 128, 1, use_cudnn=use_cudnn),
                    pool=A.AveragePooling2D(5, 3, use_cudnn=use_cudnn)),
            AuxConv(C.Convolution2D(128, 768, 5, use_cudnn=use_cudnn)))

        super(InceptionV3, self).__init__(
            convolution=convolution,
            inception=link.ChainList(inception0, inception1, inception2),
            grid_reduction=link.ChainList(grid_reduction0, grid_reduction1),
            auxiliary_convolution=auxiliary_convolution,
            auxiliary_linear=linear.Linear(768, 1000),
            linear=linear.Linear(2048, 1000))
示例#17
0
 def __init__(self, *args, **kwargs):
     conv = C.Convolution2D(*args, **kwargs)
     out_channel = len(conv.W.data)
     batch_norm = B.BatchNormalization(out_channel)
     super(ConvBN, self).__init__(conv=conv, batch_norm=batch_norm)
示例#18
0
 def __init__(self, use_cudnn=True):
     super(Alex, self).__init__(
         conv1a=convolution_2d.Convolution2D(3,
                                             48,
                                             11,
                                             stride=4,
                                             pad=2,
                                             use_cudnn=use_cudnn),
         conv1b=convolution_2d.Convolution2D(3,
                                             48,
                                             11,
                                             stride=4,
                                             pad=2,
                                             use_cudnn=use_cudnn),
         conv2a=convolution_2d.Convolution2D(48,
                                             128,
                                             5,
                                             pad=2,
                                             use_cudnn=use_cudnn),
         conv2b=convolution_2d.Convolution2D(48,
                                             128,
                                             5,
                                             pad=2,
                                             use_cudnn=use_cudnn),
         conv3a=convolution_2d.Convolution2D(128,
                                             192,
                                             3,
                                             pad=1,
                                             use_cudnn=use_cudnn),
         conv3b=convolution_2d.Convolution2D(128,
                                             192,
                                             3,
                                             pad=1,
                                             use_cudnn=use_cudnn),
         conv3c=convolution_2d.Convolution2D(128,
                                             192,
                                             3,
                                             pad=1,
                                             use_cudnn=use_cudnn),
         conv3d=convolution_2d.Convolution2D(128,
                                             192,
                                             3,
                                             pad=1,
                                             use_cudnn=use_cudnn),
         conv4a=convolution_2d.Convolution2D(192,
                                             192,
                                             3,
                                             pad=1,
                                             use_cudnn=use_cudnn),
         conv4b=convolution_2d.Convolution2D(192,
                                             192,
                                             3,
                                             pad=1,
                                             use_cudnn=use_cudnn),
         conv5a=convolution_2d.Convolution2D(192,
                                             128,
                                             3,
                                             pad=1,
                                             use_cudnn=use_cudnn),
         conv5b=convolution_2d.Convolution2D(192,
                                             128,
                                             3,
                                             pad=1,
                                             use_cudnn=use_cudnn),
         fc6a=linear.Linear(4608, 4096),
         fc6b=linear.Linear(4608, 4096),
         fc7=linear.Linear(4096, 4096),
         fc8=linear.Linear(4096, 1000))