示例#1
0
    def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
        assert num_layers == len(num_filters), \
            'ERROR: num_deconv_layers is different from len(num_deconv_filters)'
        assert num_layers == len(num_kernels), \
            'ERROR: num_deconv_layers is different from len(num_deconv_filters)'

        layer = nn.HybridSequential(prefix='final_')
        with layer.name_scope():
            for i in range(num_layers):
                kernel, padding, output_padding = \
                    self._get_deconv_cfg(num_kernels[i])

                planes = num_filters[i]
                layer.add(
                    nn.Conv2DTranspose(
                        channels=planes,
                        kernel_size=kernel,
                        strides=2,
                        padding=padding,
                        output_padding=output_padding,
                        use_bias=self.deconv_with_bias,
                        weight_initializer=initializer.Normal(0.001),
                        bias_initializer=initializer.Zero()))
                layer.add(nn.BatchNorm(gamma_initializer=initializer.One(),
                                       beta_initializer=initializer.Zero()))
                layer.add(nn.Activation('relu'))
                self.inplanes = planes

        return layer
示例#2
0
 def __init__(self, planes, upscale_factor=2, **kwargs):
     super(DUC, self).__init__(**kwargs)
     self.conv = nn.Conv2D(planes, kernel_size=3, padding=1, use_bias=False)
     self.bn = gcv.nn.BatchNormCudnnOff(gamma_initializer=initializer.One(),
                                        beta_initializer=initializer.Zero())
     self.relu = nn.Activation('relu')
     self.pixel_shuffle = contrib.nn.PixelShuffle2D(upscale_factor)
    def __init__(self,
                 fixed_size=True,
                 base_name='resnet50_v1b',
                 pretrained_base=False,
                 pretrained_ctx=cpu(),
                 num_joints=17,
                 num_deconv_layers=3,
                 num_deconv_filters=(256, 256, 256),
                 num_deconv_kernels=(4, 4, 4),
                 final_conv_kernel=1,
                 deconv_with_bias=False,
                 in_channels=3,
                 in_size=(256, 192),
                 **kwargs):
        super(SimplePoseResNet, self).__init__(**kwargs)
        assert (in_channels == 3)
        self.in_size = in_size

        from gluoncv.model_zoo import get_model
        base_network = get_model(base_name,
                                 pretrained=pretrained_base,
                                 ctx=pretrained_ctx,
                                 norm_layer=gcv.nn.BatchNormCudnnOff)

        self.resnet = nn.HybridSequential()
        if base_name.endswith('v1'):
            for layer in ['features']:
                self.resnet.add(getattr(base_network, layer))
        else:
            for layer in [
                    'conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2',
                    'layer3', 'layer4'
            ]:
                self.resnet.add(getattr(base_network, layer))

        self.deconv_with_bias = deconv_with_bias

        # used for deconv layers
        self.deconv_layers = self._make_deconv_layer(
            num_deconv_layers,
            num_deconv_filters,
            num_deconv_kernels,
        )

        self.final_layer = nn.Conv2D(
            channels=num_joints,
            kernel_size=final_conv_kernel,
            strides=1,
            padding=1 if final_conv_kernel == 3 else 0,
            weight_initializer=initializer.Normal(0.001),
            bias_initializer=initializer.Zero())
def newModel() -> glu.nn.HybridSequential:
    model = glu.nn.HybridSequential()
    # with model.name_scope():
    model.add(
        HWY((glu.nn.Dense(100), )),
        glu.nn.Activation('relu'),
        # Skip((
        #     glu.nn.Dense(100),
        # )),
        # glu.nn.Activation('relu'),
        glu.nn.Dense(10))

    # init = initializer.Uniform()
    init = initializer.Zero()
    model.initialize(ctx=CTX, init=init)
    return model
示例#5
0
    def __init__(self, ctx=mx.cpu(), pretrained=True, **kwargs):
        super(FastPose_SE, self).__init__()

        self.preact = SEResnet('resnet101', norm_layer=norm_layer, **kwargs)
        self.reload_base()

        self.shuffle1 = PixelShuffle(2)
        self.duc1 = DUC(1024,
                        upscale_factor=2,
                        norm_layer=norm_layer,
                        **kwargs)
        self.duc2 = DUC(512, upscale_factor=2, norm_layer=norm_layer, **kwargs)

        self.conv_out = nn.Conv2D(channels=opt.nClasses,
                                  kernel_size=3,
                                  strides=1,
                                  padding=1,
                                  weight_initializer=initializer.Normal(0.001),
                                  bias_initializer=initializer.Zero())
示例#6
0
    def __init__(self, base_name='resnet50_v1b',
                 pretrained_base=False, pretrained_ctx=cpu(),
                 num_joints=19,
                 num_deconv_layers=3,
                 num_deconv_filters=(256, 256, 256),
                 num_deconv_kernels=(4, 4, 4),
                 final_conv_kernel=1, deconv_with_bias=False, **kwargs):
        super(MultiPoseResNet, self).__init__(**kwargs)

        base_network = get_model(base_name, pretrained=pretrained_base, ctx=pretrained_ctx)

        self.resnet = nn.HybridSequential()
        if base_name.endswith('v1'):
            for layer in ['features']:
                self.resnet.add(getattr(base_network, layer))
        else:
            for layer in ['conv1', 'bn1', 'relu', 'maxpool',
                          'layer1', 'layer2', 'layer3', 'layer4']:
                self.resnet.add(getattr(base_network, layer))

        self.deconv_with_bias = deconv_with_bias

        # used for deconv layers
        self.deconv_layers = self._make_deconv_layer(
            num_deconv_layers,
            num_deconv_filters,
            num_deconv_kernels,
        )

        self.final_layer = nn.Conv2D(
            channels=num_joints*3,
            kernel_size=final_conv_kernel,
            strides=1,
            prefix='final_',
            padding=1 if final_conv_kernel == 3 else 0,
            weight_initializer=initializer.Normal(0.001),
            bias_initializer=initializer.Zero()
        )
示例#7
0
    def __init__(self, input_nc, output_nc, ngf, ctx=cpu(0)):
        super(defineG_unet, self).__init__()
        with self.name_scope():
            self.encoder = nn.HybridSequential()
            encoder_1 = nn.Conv2D(channels=ngf, kernel_size=4, strides=2, padding=1, in_channels=input_nc)
            encoder_2 = _encoder_module(ngf * 1, ngf * 2)
            encoder_3 = _encoder_module(ngf * 2, ngf * 4)
            encoder_4 = _encoder_module(ngf * 4, ngf * 8)
            encoder_5 = _encoder_module(ngf * 8, ngf * 8)
            encoder_6 = _encoder_module(ngf * 8, ngf * 8)
            encoder_7 = _encoder_module(ngf * 8, ngf * 8)
            encoder_8 = _encoder_module(ngf * 8, ngf * 8, norm_layer=None)
            self.encoder.add(*[encoder_1, encoder_2, encoder_3, encoder_4,
                               encoder_5, encoder_6, encoder_7, encoder_8])

            self.decoder = nn.HybridSequential()
            decoder_1 = _decoder_module(ngf * 8, ngf * 8, dp=0.5)
            decoder_2 = _decoder_module(ngf * 8 * 2, ngf * 8, dp=0.5)
            decoder_3 = _decoder_module(ngf * 8 * 2, ngf * 8, dp=0.5)
            decoder_4 = _decoder_module(ngf * 8 * 2, ngf * 8)
            decoder_5 = _decoder_module(ngf * 8 * 2, ngf * 4)
            decoder_6 = _decoder_module(ngf * 4 * 2, ngf * 2)
            decoder_7 = _decoder_module(ngf * 2 * 2, ngf * 1)
            decoder_8 = _decoder_module(ngf * 1 * 2, output_nc, norm_layer=None)
            self.decoder.add(*[decoder_1, decoder_2, decoder_3, decoder_4,
                               decoder_5, decoder_6, decoder_7, decoder_8])

        self.encoder.collect_params('.*weight').initialize(ctx=ctx, init=ShiftNormal(0, 0.02))
        self.encoder.collect_params('.*gamma|.*running_var').initialize(ctx=ctx, init=ShiftNormal(1.0, 0.02))
        self.encoder.collect_params('.*bias|.*running_mean|.*beta').initialize(ctx=ctx, init=initializer.Zero())
        self.decoder.collect_params('.*weight').initialize(ctx=ctx, init=ShiftNormal(0, 0.02))
        self.decoder.collect_params('.*gamma|.*running_var').initialize(ctx=ctx, init=ShiftNormal(1.0, 0.02))
        self.decoder.collect_params('.*bias|.*running_mean|.*beta').initialize(ctx=ctx, init=initializer.Zero())