def _mask_layer(self): aspp = [ make_conv(self.dim_in, self.aspp_dim, kernel_size=1, norm=make_norm(self.aspp_dim, norm=self.norm), act=make_act(act=self.act)) ] for dilation in self.dilations: aspp.append( make_conv(self.dim_in, self.aspp_dim, kernel_size=3, dilation=dilation, conv=self.conv, norm=make_norm(self.aspp_dim, norm=self.norm), act=make_act(act=self.act))) self.aspp = nn.ModuleList(aspp) self.im_pool = nn.Sequential( nn.AdaptiveAvgPool2d(1), make_conv(self.dim_in, self.aspp_dim, kernel_size=1, norm=make_norm(self.aspp_dim, norm=self.norm), act=make_act(act=self.act)))
def _make_layer(self, dim_in, fpn_dim, norm, act=""): # P5 in self.p5_in = make_conv(self.dim_in, fpn_dim, kernel_size=1, norm=make_norm(fpn_dim, norm=norm), act=make_act(act=act)) # P5 out self.p5_out = make_conv(fpn_dim, fpn_dim, kernel_size=3, norm=make_norm(fpn_dim, norm=norm), act=make_act(act=act)) # fpn module self.fpn_in = [] self.fpn_out = [] for i in range(self.num_backbone_stages - 1): # skip the top layer px_in = make_conv(dim_in[-i - 2], fpn_dim, kernel_size=1, norm=make_norm(fpn_dim, norm=norm), act=make_act(act=act)) # P4 to P2 px_out = make_conv(fpn_dim, fpn_dim, kernel_size=3, norm=make_norm(fpn_dim, norm=norm), act=make_act(act=act)) self.fpn_in.append(px_in) self.fpn_out.append(px_out) self.fpn_in = nn.ModuleList(self.fpn_in) # [P4, P3, P2] self.fpn_out = nn.ModuleList(self.fpn_out) self.dim_in = fpn_dim if not self.extra_conv and self.max_level == self.highest_bk_lvl + 1: self.maxpool_p6 = nn.MaxPool2d(kernel_size=1, stride=2, padding=0) self.spatial_scale.append(self.spatial_scale[-1] * 0.5) if self.extra_conv and self.max_level > self.highest_bk_lvl: self.extra_pyramid_modules = nn.ModuleList() if self.use_c5: self.dim_in = dim_in[-1] for i in range(self.highest_bk_lvl + 1, self.max_level + 1): self.extra_pyramid_modules.append( make_conv(self.dim_in, fpn_dim, kernel_size=3, stride=2, norm=make_norm(fpn_dim, norm=norm), act=make_act(act=act))) self.dim_in = fpn_dim self.spatial_scale.append(self.spatial_scale[-1] * 0.5)
def _mask_layer(self): ppm = [] for scale in self.pool_scales: ppm.append( nn.Sequential( nn.AdaptiveAvgPool2d(scale), make_conv(self.dim_in, self.ppm_dim, kernel_size=1, norm=make_norm(self.ppm_dim, norm=self.norm), act=make_act()))) self.ppm = nn.ModuleList(ppm)
def __init__(self, inplanes, planes, base_width=64, cardinality=1, stride=1, dilation=1, radix=1, downsample=None, stride_3x3=False, conv='Conv2d', norm='BN', ctx=''): super(BasicBlock, self).__init__() width = int(planes * (base_width / 64.)) self.conv1 = make_conv(inplanes, width, kernel_size=3, stride=stride, dilation=dilation, padding=dilation, bias=False, conv=conv) self.bn1 = make_norm(width, norm=norm, an_k=10 if planes < 256 else 20) self.conv2 = make_conv(width, width, kernel_size=3, stride=1, dilation=dilation, padding=dilation, bias=False, conv=conv) self.bn2 = make_norm(width, norm=norm, an_k=10 if planes < 256 else 20) self.ctx = make_ctx(width, int(width * 0.0625), ctx=ctx) # ctx_ratio=1 / 16. self.relu = nn.ReLU(inplace=True) self.downsample = downsample
def __init__(self, inplanes, planes, base_width=64, cardinality=1, stride=1, dilation=1, radix=1, downsample=None, stride_3x3=False, conv='Conv2d', norm='BN', ctx=''): super(Bottleneck, self).__init__() (str1x1, str3x3) = (1, stride) if stride_3x3 else (stride, 1) D = int(math.floor(planes * (base_width / 64.0))) C = cardinality self.radix = radix self.conv1 = nn.Conv2d(inplanes, D * C, kernel_size=1, stride=str1x1, padding=0, bias=False) self.bn1 = make_norm(D * C, norm=norm.replace('Mix', '')) if radix > 1 and (str3x3 > 1 or dilation > 1): self.avd_layer = nn.AvgPool2d(3, str3x3, padding=1) str3x3 = 1 else: self.avd_layer = None if radix > 1: self.conv2 = SplAtConv2d(D * C, D * C, kernel_size=3, stride=str3x3, padding=dilation, dilation=dilation, groups=C, bias=False, radix=radix, conv_op=get_conv_op(conv=conv), norm_op=get_norm_op(norm=norm)) else: self.conv2 = make_conv(D * C, D * C, kernel_size=3, stride=str3x3, padding=dilation, dilation=dilation, groups=C, bias=False, conv=conv) self.bn2 = make_norm(D * C, norm=norm, an_k=10 if planes < 256 else 20) self.conv3 = nn.Conv2d(D * C, planes * self.expansion, kernel_size=1, stride=1, padding=0, bias=False) self.bn3 = make_norm(planes * self.expansion, norm=norm.replace('Mix', '')) self.ctx = make_ctx(planes * self.expansion, int(planes * self.expansion * 0.0625), ctx=ctx) # ctx_ratio=1 / 16. self.relu = nn.ReLU(inplace=True) self.downsample = downsample