def __init__(self, fbody_cls): super(AlphaFBDecoder, self).__init__() self.conv1 = nn.Sequential( nn.Conv2d(512, 256, kernel_size=3, padding=1, stride=1, bias=False), BatchNorm2d(256), nn.ReLU(inplace=False), nn.Conv2d(256, 256, kernel_size=1, padding=0, stride=1, bias=False), BatchNorm2d(256), nn.ReLU(inplace=False), SEModule(256, reduction=16)) self.alpha_fb = nn.Parameter(torch.ones(1))
def __init__(self, fbody_cls): super(FBodyDecoder, self).__init__() self.conv0 = nn.Sequential(nn.Conv2d(256, 48, kernel_size=1, stride=1, padding=0, dilation=1, bias=False), BatchNorm2d(48), nn.ReLU(inplace=False)) self.conv1 = nn.Sequential(nn.Conv2d(304, 256, kernel_size=3, padding=1, stride=1, bias=False), BatchNorm2d(256), nn.ReLU(inplace=False), nn.Conv2d(256, 256, kernel_size=1, padding=0, stride=1, bias=False), BatchNorm2d(256), nn.ReLU(inplace=False), SEModule(256, reduction=16), ) self.project=nn.Conv2d(256, fbody_cls, kernel_size=1, padding=0, stride=1, bias=True)
def __init__(self, num_classes): super(DecoderModule, self).__init__() self.conv1 = nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, padding=1, stride=1, bias=False), BatchNorm2d(256), nn.ReLU(inplace=False), nn.Conv2d(256, 256, kernel_size=1, padding=0, stride=1, bias=False), BatchNorm2d(256), nn.ReLU(inplace=False), SEModule(256, reduction=16) ) self.conv2 = nn.Conv2d(256, num_classes, kernel_size=1, padding=0, dilation=1, bias=True) self.alpha = nn.Parameter(torch.ones(1))
def __init__(self, hbody_cls): super(AlphaHBDecoder, self).__init__() self.conv1 = nn.Sequential( nn.Conv2d(512, 256, kernel_size=3, padding=1, stride=1, bias=False), BatchNorm2d(256), nn.ReLU(inplace=False), nn.Conv2d(256, 256, kernel_size=1, padding=0, stride=1, bias=False), BatchNorm2d(256), nn.ReLU(inplace=False), SEModule(256, reduction=16), nn.Conv2d(256, hbody_cls, kernel_size=1, padding=0, stride=1, bias=True))
def __init__(self, in_dim, out_dim, scale): super(SEASPPModule, self).__init__() self.atte_branch = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=3, padding=1, dilation=1, bias=True), InPlaceABNSync(out_dim), SelfAttentionModule(in_dim=out_dim, out_dim=out_dim, key_dim=out_dim // 2, value_dim=out_dim, scale=scale)) self.dilation_0 = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=1, padding=0, dilation=1, bias=False), InPlaceABNSync(out_dim), SEModule(out_dim, reduction=16)) self.dilation_1 = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=3, padding=12, dilation=12, bias=False), InPlaceABNSync(out_dim), SEModule(out_dim, reduction=16)) self.dilation_2 = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=3, padding=24, dilation=24, bias=False), InPlaceABNSync(out_dim), SEModule(out_dim, reduction=16)) self.dilation_3 = nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=3, padding=36, dilation=36, bias=False), InPlaceABNSync(out_dim), SEModule(out_dim, reduction=16)) self.head_conv = nn.Sequential(nn.Conv2d(out_dim * 5, out_dim, kernel_size=1, padding=0, bias=False), InPlaceABNSync(out_dim), nn.Conv2d(out_dim, out_dim, kernel_size=3, stride=1, padding=1, bias=False), InPlaceABNSync(out_dim))
def __init__(self, in_dim, num_classes): super(DecoderModule, self).__init__() self.conv1 = nn.Sequential( nn.Conv2d(in_dim, 256, kernel_size=3, padding=1, stride=1, bias=False), BatchNorm2d(256), nn.ReLU(inplace=False), nn.Conv2d(256, 256, kernel_size=3, padding=1, dilation=1, bias=False), BatchNorm2d(256), nn.ReLU(inplace=False), SEModule(256, reduction=16), nn.Conv2d(256, num_classes, kernel_size=1, padding=0, dilation=1, bias=True))
def __init__(self, in_dim, out_dim, scale=1): super(ASPPModule, self).__init__() self.gap = nn.Sequential(nn.AdaptiveAvgPool2d(1), nn.Conv2d(in_dim, out_dim, 1, bias=False), InPlaceABNSync(out_dim)) self.dilation_0 = nn.Sequential( nn.Conv2d(in_dim, out_dim, kernel_size=1, padding=0, dilation=1, bias=False), InPlaceABNSync(out_dim), SEModule(out_dim, reduction=16)) self.dilation_1 = nn.Sequential( nn.Conv2d(in_dim, out_dim, kernel_size=1, padding=0, dilation=1, bias=False), InPlaceABNSync(out_dim), nn.Conv2d(out_dim, out_dim, kernel_size=3, padding=6, dilation=6, bias=False), InPlaceABNSync(out_dim), SEModule(out_dim, reduction=16)) self.dilation_2 = nn.Sequential( nn.Conv2d(in_dim, out_dim, kernel_size=1, padding=0, dilation=1, bias=False), InPlaceABNSync(out_dim), nn.Conv2d(out_dim, out_dim, kernel_size=3, padding=12, dilation=12, bias=False), InPlaceABNSync(out_dim), SEModule(out_dim, reduction=16)) self.dilation_3 = nn.Sequential( nn.Conv2d(in_dim, out_dim, kernel_size=1, padding=0, dilation=1, bias=False), InPlaceABNSync(out_dim), nn.Conv2d(out_dim, out_dim, kernel_size=3, padding=18, dilation=18, bias=False), InPlaceABNSync(out_dim), SEModule(out_dim, reduction=16)) self.psaa_conv = nn.Sequential( nn.Conv2d(in_dim + 5 * out_dim, out_dim, 1, padding=0, bias=False), InPlaceABNSync(out_dim), nn.Conv2d(out_dim, 5, 1, bias=True), nn.Sigmoid()) self.project = nn.Sequential( nn.Conv2d(out_dim * 5, out_dim, kernel_size=1, padding=0, bias=False), InPlaceABNSync(out_dim))
def __init__(self, in_dim, out_dim): super(ASPPModule2, self).__init__() self.gap = nn.Sequential(nn.AdaptiveAvgPool2d(1), nn.Conv2d(in_dim, out_dim, 1, bias=False), InPlaceABNSync(out_dim)) self.dilation_0 = nn.Sequential( nn.Conv2d(in_dim, out_dim, kernel_size=1, padding=0, dilation=1, bias=False), InPlaceABNSync(out_dim), SEModule(out_dim, reduction=16)) self.dilation_1 = nn.Sequential( nn.Conv2d(in_dim, out_dim, kernel_size=1, padding=0, dilation=1, bias=False), InPlaceABNSync(out_dim), nn.Conv2d(out_dim, out_dim, kernel_size=3, padding=6, dilation=6, bias=False), InPlaceABNSync(out_dim), SEModule(out_dim, reduction=16)) self.dilation_2 = nn.Sequential( nn.Conv2d(in_dim, out_dim, kernel_size=1, padding=0, dilation=1, bias=False), InPlaceABNSync(out_dim), nn.Conv2d(out_dim, out_dim, kernel_size=3, padding=12, dilation=12, bias=False), InPlaceABNSync(out_dim), SEModule(out_dim, reduction=16)) self.dilation_3 = nn.Sequential( nn.Conv2d(in_dim, out_dim, kernel_size=1, padding=0, dilation=1, bias=False), InPlaceABNSync(out_dim), nn.Conv2d(out_dim, out_dim, kernel_size=3, padding=18, dilation=18, bias=False), InPlaceABNSync(out_dim), SEModule(out_dim, reduction=16)) self.project = nn.Sequential( nn.Conv2d(out_dim * 5, out_dim, kernel_size=1, padding=0, bias=False), InPlaceABNSync(out_dim)) self.pam0 = PAM_Module(in_dim=out_dim, key_dim=out_dim // 8, value_dim=out_dim, out_dim=out_dim)