Example #1
0
 def __init__(self, base_model, num_classes, feature_stride, **kwargs):
     super(VGGFastRCNNHead, self).__init__(**kwargs)
     self.feature_stride = feature_stride
     self.fc_layers = nn.HybridSequential()
     # Include last 4 vgg feature layers (2 * (fc + dropout))
     for layer in base_model.features[-4:]:
         self.fc_layers.add(layer)
     self.cls_score = nn.Dense(in_units=4096,
                               units=num_classes,
                               weight_initializer=initializer.Normal(0.01))
     self.bbox_pred = nn.Dense(in_units=4096,
                               units=num_classes * 4,
                               weight_initializer=initializer.Normal(0.001))
Example #2
0
    def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
        assert num_layers == len(num_filters), \
            'ERROR: num_deconv_layers is different from len(num_deconv_filters)'
        assert num_layers == len(num_kernels), \
            'ERROR: num_deconv_layers is different from len(num_deconv_filters)'

        layer = nn.HybridSequential(prefix='final_')
        with layer.name_scope():
            for i in range(num_layers):
                kernel, padding, output_padding = \
                    self._get_deconv_cfg(num_kernels[i])

                planes = num_filters[i]
                layer.add(
                    nn.Conv2DTranspose(
                        channels=planes,
                        kernel_size=kernel,
                        strides=2,
                        padding=padding,
                        output_padding=output_padding,
                        use_bias=self.deconv_with_bias,
                        weight_initializer=initializer.Normal(0.001),
                        bias_initializer=initializer.Zero()))
                layer.add(nn.BatchNorm(gamma_initializer=initializer.One(),
                                       beta_initializer=initializer.Zero()))
                layer.add(nn.Activation('relu'))
                self.inplanes = planes

        return layer
Example #3
0
def ff(dim, depth):
    net = gluon.nn.Sequential()
    net.add(gluon.nn.Dense(dim, activation='swish'))
    for _ in range(depth):
        net.add(gluon.nn.Dense(dim, activation='swish'))
    net.add(gluon.nn.Dense(1, activation='swish'))
    net.initialize(init=initializer.Normal())
    return net
Example #4
0
 def __init__(self, num_anchors, **kwargs):
     super(RPNHead, self).__init__(**kwargs)
     self.num_anchors = num_anchors
     with self.name_scope():
         self.rpn_conv = nn.Conv2D(
             channels=512,
             kernel_size=(3, 3),
             padding=(1, 1),
             activation='relu',
             weight_initializer=initializer.Normal(0.01))
         self.rpn_cls_score = nn.Conv2D(
             channels=2 * num_anchors,
             kernel_size=(1, 1),
             padding=(0, 0),
             weight_initializer=initializer.Normal(0.01))
         self.rpn_bbox_pred = nn.Conv2D(
             channels=4 * num_anchors,
             kernel_size=(1, 1),
             padding=(0, 0),
             weight_initializer=initializer.Normal(0.01))
Example #5
0
def generate_initializer(init_dict):
    if init_dict is None:
        return init.Normal()
    init_type = init_dict['type']
    init_param = init_dict['init_config']

    # currently Uniform, Normal, Xavier, MSRAPrelu are supported
    if init_type == 'Uniform':
        scale = float(init_param['scale'])
        return init.Uniform(scale)
    if init_type == 'Normal':
        sigma = float(init_param['sigma'])
        return init.Normal(sigma)
    # Xavier
    if init_type == 'Xavier':
        magnitude = float(init_param['magnitude'])
        return init.Xavier(magnitude=magnitude)
    # PReLU
    if init_type == 'MSRAPrelu':
        slope = float(init_param['slope'])
        return init.MSRAPrelu(factor_type='avg', slope=slope)
    def __init__(self,
                 fixed_size=True,
                 base_name='resnet50_v1b',
                 pretrained_base=False,
                 pretrained_ctx=cpu(),
                 num_joints=17,
                 num_deconv_layers=3,
                 num_deconv_filters=(256, 256, 256),
                 num_deconv_kernels=(4, 4, 4),
                 final_conv_kernel=1,
                 deconv_with_bias=False,
                 in_channels=3,
                 in_size=(256, 192),
                 **kwargs):
        super(SimplePoseResNet, self).__init__(**kwargs)
        assert (in_channels == 3)
        self.in_size = in_size

        from gluoncv.model_zoo import get_model
        base_network = get_model(base_name,
                                 pretrained=pretrained_base,
                                 ctx=pretrained_ctx,
                                 norm_layer=gcv.nn.BatchNormCudnnOff)

        self.resnet = nn.HybridSequential()
        if base_name.endswith('v1'):
            for layer in ['features']:
                self.resnet.add(getattr(base_network, layer))
        else:
            for layer in [
                    'conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2',
                    'layer3', 'layer4'
            ]:
                self.resnet.add(getattr(base_network, layer))

        self.deconv_with_bias = deconv_with_bias

        # used for deconv layers
        self.deconv_layers = self._make_deconv_layer(
            num_deconv_layers,
            num_deconv_filters,
            num_deconv_kernels,
        )

        self.final_layer = nn.Conv2D(
            channels=num_joints,
            kernel_size=final_conv_kernel,
            strides=1,
            padding=1 if final_conv_kernel == 3 else 0,
            weight_initializer=initializer.Normal(0.001),
            bias_initializer=initializer.Zero())
Example #7
0
    def __init__(self, ctx=mx.cpu(), pretrained=True, **kwargs):
        super(FastPose_SE, self).__init__()

        self.preact = SEResnet('resnet101', norm_layer=norm_layer, **kwargs)
        self.reload_base()

        self.shuffle1 = PixelShuffle(2)
        self.duc1 = DUC(1024,
                        upscale_factor=2,
                        norm_layer=norm_layer,
                        **kwargs)
        self.duc2 = DUC(512, upscale_factor=2, norm_layer=norm_layer, **kwargs)

        self.conv_out = nn.Conv2D(channels=opt.nClasses,
                                  kernel_size=3,
                                  strides=1,
                                  padding=1,
                                  weight_initializer=initializer.Normal(0.001),
                                  bias_initializer=initializer.Zero())
def build_initializer(type, kerasDefaults, constant=0.):
    
    if type == 'constant':
        return initializer.Constant(constant)
    
    elif type == 'uniform':
        return initializer.Uniform(scale=kerasDefaults['maxval_uniform'])

    elif type == 'normal':
        return initializer.Normal(sigma=kerasDefaults['stddev_normal'])

    elif type == 'glorot_uniform':
        return initializer.Xavier(rnd_type='uniform', factor_type='avg', magnitude=3.)

    elif type == 'lecun_uniform':
        return initializers.Xavier(rnd_type='uniform', factor_type='in', magnitude=3.)

    elif type == 'he_normal':
        return initializer.Xavier(rnd_type='gaussian', factor_type='in', magnitude=2.)
Example #9
0
    def __init__(self,
                 base_name,
                 base_attrs=('features', ),
                 num_joints=17,
                 fixed_size=True,
                 pretrained_base=False,
                 pretrained_ctx=cpu(),
                 in_channels=3,
                 in_size=(256, 192),
                 **kwargs):
        super(MobilePose, self).__init__(**kwargs)
        assert (in_channels == 3)
        self.in_size = in_size

        with self.name_scope():
            from gluoncv.model_zoo import get_model
            base_model = get_model(base_name,
                                   pretrained=pretrained_base,
                                   ctx=pretrained_ctx)
            self.features = nn.HybridSequential()
            if base_name.startswith('mobilenetv2'):
                self.features.add(base_model.features[:-1])
            elif base_name.startswith('mobilenetv3'):
                self.features.add(base_model.features[:-4])
            elif base_name.startswith('mobilenet'):
                self.features.add(base_model.features[:-2])
            else:
                for layer in base_attrs:
                    self.features.add(getattr(base_model, layer))

            self.upsampling = nn.HybridSequential()
            self.upsampling.add(
                nn.Conv2D(256, 1, 1, 0, use_bias=False),
                DUC(512, 2),
                DUC(256, 2),
                DUC(128, 2),
                nn.Conv2D(num_joints,
                          1,
                          use_bias=False,
                          weight_initializer=initializer.Normal(0.001)),
            )
Example #10
0
    def __init__(self, base_name='resnet50_v1b',
                 pretrained_base=False, pretrained_ctx=cpu(),
                 num_joints=19,
                 num_deconv_layers=3,
                 num_deconv_filters=(256, 256, 256),
                 num_deconv_kernels=(4, 4, 4),
                 final_conv_kernel=1, deconv_with_bias=False, **kwargs):
        super(MultiPoseResNet, self).__init__(**kwargs)

        base_network = get_model(base_name, pretrained=pretrained_base, ctx=pretrained_ctx)

        self.resnet = nn.HybridSequential()
        if base_name.endswith('v1'):
            for layer in ['features']:
                self.resnet.add(getattr(base_network, layer))
        else:
            for layer in ['conv1', 'bn1', 'relu', 'maxpool',
                          'layer1', 'layer2', 'layer3', 'layer4']:
                self.resnet.add(getattr(base_network, layer))

        self.deconv_with_bias = deconv_with_bias

        # used for deconv layers
        self.deconv_layers = self._make_deconv_layer(
            num_deconv_layers,
            num_deconv_filters,
            num_deconv_kernels,
        )

        self.final_layer = nn.Conv2D(
            channels=num_joints*3,
            kernel_size=final_conv_kernel,
            strides=1,
            prefix='final_',
            padding=1 if final_conv_kernel == 3 else 0,
            weight_initializer=initializer.Normal(0.001),
            bias_initializer=initializer.Zero()
        )
Example #11
0
@use_np
@with_environment('MXNET_ENGINE_TYPE', 'NaiveEngine')
def test_18934_empty_leaky_relu():
    arr = np.random.rand(0,2)
    arr_grad = np.empty_like(arr)

    autograd.mark_variables([arr], [arr_grad])
    with autograd.record():
        res = npx.leaky_relu(arr)
    res.backward()

@use_np
@pytest.mark.parametrize('initializer',[
    'zeros', 'ones', initializer.Constant(3),
    initializer.Uniform(),
    initializer.Normal(),
    initializer.Orthogonal(),
    initializer.Orthogonal(rand_type='normal'),
    initializer.Xavier(),
    initializer.Xavier(rnd_type='gaussian'),
    initializer.MSRAPrelu(),
    initializer.MSRAPrelu(factor_type='in'),
    initializer.MSRAPrelu(factor_type='out'),
    initializer.LSTMBias(),
])
@pytest.mark.parametrize('dtype', [
    'float32', 'float64'
])
def test_19118(initializer, dtype):
    net = gluon.nn.Dense(16, in_units=16)
    net.cast(dtype)
Example #12
0
    def __init__(self,
                 basenetwork='resnet50_v2',
                 pretrained="True",
                 feature_channels=512,
                 classes=751,
                 laststride=2,
                 withpcb='True',
                 partnum=6,
                 feature_weight_share=False,
                 withrpp='True',
                 **kwargs):

        super(PCBRPPNet, self).__init__(**kwargs)
        basenetwork = eval(basenetwork)
        self.withpcb = withpcb
        self.withrpp = withrpp
        if self.withrpp and not self.withpcb:
            raise "If withrpp is True, with pcb must be True."
        self.feature_weight_share = feature_weight_share
        self.partnum = partnum

        self.conv = basenetwork(pretrained=pretrained,
                                laststride=laststride,
                                ctx=cpu())
        if not pretrained:
            self.conv.collect_params().initialize(init=init.Xavier(),
                                                  ctx=cpu())

        self.pool = nn.GlobalAvgPool2D()
        self.dropout = nn.Dropout(rate=0.5)

        if not self.withpcb or self.feature_weight_share:
            self.feature = nn.HybridSequential(prefix='')
            with self.feature.name_scope():
                self.feature.add(
                    nn.Dense(feature_channels,
                             activation=None,
                             use_bias=False,
                             flatten=True))
                self.feature.add(nn.BatchNorm())
                self.feature.add(nn.LeakyReLU(alpha=0.1))
            self.feature.hybridize()
            self.classifier = nn.Dense(classes, use_bias=False)
            self.feature.collect_params().initialize(init=init.Xavier(),
                                                     ctx=cpu())
            self.classifier.collect_params().initialize(
                init=init.Normal(0.001), ctx=cpu())
        else:
            for pn in range(self.partnum):
                tmp_feature = nn.Dense(feature_channels,
                                       activation=None,
                                       use_bias=False,
                                       flatten=True)
                tmp_classifier = nn.Dense(classes, use_bias=False)
                tmp_feature.collect_params().initialize(init=init.Xavier(),
                                                        ctx=cpu())
                tmp_classifier.collect_params().initialize(
                    init=init.Normal(0.001), ctx=cpu())
                setattr(self, 'feature%d' % (pn + 1), tmp_feature)
                setattr(self, 'classifier%d' % (pn + 1), tmp_classifier)

        if self.withrpp:
            # from ..init.rppinit import RPP_Init
            # rpp_init = RPP_Init(mean=0.0, sigma=0.001)
            self.rppscore = nn.Conv2D(self.partnum,
                                      kernel_size=1,
                                      use_bias=False)
            self.rppscore.collect_params().initialize(init=init.One(),
                                                      ctx=cpu())
                         head_grads=nd.ones_like(mixed_scores))[0]
    return grad


def gradient_penalty(gradient):
    gradient = gradient.reshape(gradient.shape[0], -1)
    gradient_norm = nd.norm(gradient, ord=2, axis=1)
    penalty = nd.mean((gradient_norm - 1) ** 2)
    return penalty


# %%
# -- Initialize parameters

gen = Generator(z_dim=Z_DIM)
gen.initialize(init=initializer.Normal(0.02), ctx=mx_ctx)
# %%

crit = Critic()
crit.initialize(init=initializer.Normal(0.02), ctx=mx_ctx)

# %%
# -- Print summary before hybridizing

z = nd.random.randn(1, Z_DIM, 1, 1, ctx=mx_ctx[0])
print(gen)
gen.summary(z)
# %%

xhat = nd.random.randn(1, 1, 28, 28, ctx=mx_ctx[0])
print(crit)
# W = nd.random.normal(scale=1,shape=(num_input,num_outputs))
# b = nd.zeros(shape=num_outputs)

# W.attach_grad()
# b.attach_grad()

# 定义模型、分类模型、损失函数
def softmax(X):
    x_exp = X.exp()
    partition = x_exp.sum(axis=1,keepdims = True)
    return x_exp/partition

net = nn.Sequential()
net.add(nn.Dense(10)) # 输出是10个分类
net.initialize(init.Normal(sigma=0.01))

def cross_entropy(y_hat,y):
    return -nd.pick(y_hat,y).log()

def accuracy(y_hat,y):
    return (y_hat.argmax(axis=1) == y.astype('float32')).mean().asscalar()

num_epoch=5
trianer = gluon.Trainer(net.collect_params(),'sgd',{'learning_rate':0.1})

# 这里把这个函数展开来写
# d2l.train_ch3(net,train_iter,test_iter,cross_entropy,num_epoch,batch_size,[W,b],lr)


Example #15
0
            nn.BatchNorm(), nn.Activation('relu'),
            nn.Conv2DTranspose(ngf, 4, 2, 1, use_bias=False), nn.BatchNorm(),
            nn.Activation('relu'),
            nn.Conv2DTranspose(nchannels,
                               4,
                               2,
                               1,
                               use_bias=False,
                               activation='tanh'))

    def forward(self, input):
        return self.main(input)


netG = Generator()
netG.initialize(init=initializer.Normal(0.02), ctx=ctx)
# print(netG)


class Discriminator(nn.Block):
    def __init__(self):
        super(Discriminator, self).__init__()
        self.main = nn.Sequential()
        self.main.add(
            nn.Conv2D(ndf, 4, 2, 1, use_bias=False), nn.LeakyReLU(0.2),
            nn.Conv2D(ndf * 2, 4, 2, 1, use_bias=False), nn.BatchNorm(),
            nn.LeakyReLU(0.2), nn.Conv2D(ndf * 4, 4, 2, 1, use_bias=False),
            nn.BatchNorm(), nn.LeakyReLU(0.2),
            nn.Conv2D(ndf * 8, 4, 2, 1, use_bias=False), nn.BatchNorm(),
            nn.LeakyReLU(0.2),
            nn.Conv2D(1, 4, 1, 0, use_bias=False, activation='sigmoid'))
                          use_bias=False),
                nn.BatchNorm(in_channels=output_dim), nn.LeakyReLU(alpha=0.2))
        else:
            layer.add(
                nn.Conv2D(in_channels=input_dim,
                          channels=output_dim,
                          kernel_size=kernel_size,
                          strides=strides))
        return layer


# %%
# -- Initialize parameters

gen = Generator(z_dim=Z_DIM)
gen.initialize(init=initializer.Normal(0.02), ctx=mx_ctx)
# %%

disc = Discriminator()
disc.initialize(init=initializer.Normal(0.02), ctx=mx_ctx)

# %%
# -- Print summary before hybridizing

z = nd.random.randn(1, Z_DIM, 1, 1, ctx=mx_ctx[0])
print(gen)
gen.summary(z)
# %%

xhat = nd.random.randn(1, 1, 28, 28, ctx=mx_ctx[0])
print(disc)