def _make_deconv_layer(self, num_layers, num_filters, num_kernels): assert num_layers == len(num_filters), \ 'ERROR: num_deconv_layers is different from len(num_deconv_filters)' assert num_layers == len(num_kernels), \ 'ERROR: num_deconv_layers is different from len(num_deconv_filters)' layer = nn.HybridSequential(prefix='final_') with layer.name_scope(): for i in range(num_layers): kernel, padding, output_padding = \ self._get_deconv_cfg(num_kernels[i]) planes = num_filters[i] layer.add( nn.Conv2DTranspose( channels=planes, kernel_size=kernel, strides=2, padding=padding, output_padding=output_padding, use_bias=self.deconv_with_bias, weight_initializer=initializer.Normal(0.001), bias_initializer=initializer.Zero())) layer.add(nn.BatchNorm(gamma_initializer=initializer.One(), beta_initializer=initializer.Zero())) layer.add(nn.Activation('relu')) self.inplanes = planes return layer
def __init__(self, planes, upscale_factor=2, **kwargs): super(DUC, self).__init__(**kwargs) self.conv = nn.Conv2D(planes, kernel_size=3, padding=1, use_bias=False) self.bn = gcv.nn.BatchNormCudnnOff(gamma_initializer=initializer.One(), beta_initializer=initializer.Zero()) self.relu = nn.Activation('relu') self.pixel_shuffle = contrib.nn.PixelShuffle2D(upscale_factor)
def __init__(self, basenetwork='resnet50_v2', pretrained="True", feature_channels=512, classes=751, laststride=2, withpcb='True', partnum=6, feature_weight_share=False, withrpp='True', **kwargs): super(PCBRPPNet, self).__init__(**kwargs) basenetwork = eval(basenetwork) self.withpcb = withpcb self.withrpp = withrpp if self.withrpp and not self.withpcb: raise "If withrpp is True, with pcb must be True." self.feature_weight_share = feature_weight_share self.partnum = partnum self.conv = basenetwork(pretrained=pretrained, laststride=laststride, ctx=cpu()) if not pretrained: self.conv.collect_params().initialize(init=init.Xavier(), ctx=cpu()) self.pool = nn.GlobalAvgPool2D() self.dropout = nn.Dropout(rate=0.5) if not self.withpcb or self.feature_weight_share: self.feature = nn.HybridSequential(prefix='') with self.feature.name_scope(): self.feature.add( nn.Dense(feature_channels, activation=None, use_bias=False, flatten=True)) self.feature.add(nn.BatchNorm()) self.feature.add(nn.LeakyReLU(alpha=0.1)) self.feature.hybridize() self.classifier = nn.Dense(classes, use_bias=False) self.feature.collect_params().initialize(init=init.Xavier(), ctx=cpu()) self.classifier.collect_params().initialize( init=init.Normal(0.001), ctx=cpu()) else: for pn in range(self.partnum): tmp_feature = nn.Dense(feature_channels, activation=None, use_bias=False, flatten=True) tmp_classifier = nn.Dense(classes, use_bias=False) tmp_feature.collect_params().initialize(init=init.Xavier(), ctx=cpu()) tmp_classifier.collect_params().initialize( init=init.Normal(0.001), ctx=cpu()) setattr(self, 'feature%d' % (pn + 1), tmp_feature) setattr(self, 'classifier%d' % (pn + 1), tmp_classifier) if self.withrpp: # from ..init.rppinit import RPP_Init # rpp_init = RPP_Init(mean=0.0, sigma=0.001) self.rppscore = nn.Conv2D(self.partnum, kernel_size=1, use_bias=False) self.rppscore.collect_params().initialize(init=init.One(), ctx=cpu())