def __init__(self, pretrained=True, num_norm=6): super(Res101_SFCN_BN, self).__init__() self.num_norm = num_norm self.backend_feat = [512, 512, 512, 256, 128, 64] self.frontend = [] self.backend = make_layers(self.backend_feat, in_channels=1024, dilation=True, batch_norm=True, num_norm=self.num_norm) self.convDU = convDU(in_out_channels=64, kernel_size=(1, 9)) self.convLR = convLR(in_out_channels=64, kernel_size=(9, 1)) self.output_layer = nn.Sequential(nn.Conv2d(64, 1, kernel_size=1), nn.ReLU()) initialize_weights(self.modules()) res = models.resnet101(pretrained=pretrained) self.frontend = nn.Sequential(res.conv1, res.bn1, res.relu, res.maxpool, res.layer1, res.layer2) self.own_reslayer_3 = make_res_layer(Bottleneck, 256, 23, stride=1) self.own_reslayer_3.load_state_dict(res.layer3.state_dict())
def __init__(self, norm=None, num_gbnnorm=6): super(CrowdDecoder, self).__init__() self.norm = norm self.num_gbnnorm = num_gbnnorm self.backend_feat = [512, 512, 512, 256, 128, 64] self.backend = make_layers(self.backend_feat, in_channels=1024, dilation=True, norm=self.norm, num_gbnnorm=self.num_gbnnorm) self.convDU = convDU(in_out_channels=64, kernel_size=(1, 9)) self.convLR = convLR(in_out_channels=64, kernel_size=(9, 1)) self.output_layer = nn.Sequential(nn.Conv2d(64, 1, kernel_size=1), nn.ReLU()) initialize_weights(self.modules())
def __init__(self, pretrained=True): super(EfficientNet_SFCN, self).__init__() self.seen = 0 self.res = EfficientNet.from_pretrained('efficientnet-b7') self.frontend = nn.Sequential(self.res._conv_stem, self.res._bn0, self.res._swish) self.convOut = nn.Sequential(nn.Conv2d(80, 64, kernel_size=1), nn.ReLU()) self.convDU = convDU(in_out_channels=64, kernel_size=(1, 9)) self.convLR = convLR(in_out_channels=64, kernel_size=(9, 1)) # Final linear layer self.output_layer = nn.Sequential(nn.Conv2d(64, 1, kernel_size=1), nn.ReLU())
def __init__(self, bn=False, num_classes=10): super(ori, self).__init__() self.num_classes = num_classes self.base_layer = nn.Sequential( Conv2d(1, 16, 9, same_padding=True, NL='prelu', bn=bn), Conv2d(16, 32, 7, same_padding=True, NL='prelu', bn=bn)) self.hl_prior = nn.Sequential( Conv2d(32, 16, 9, same_padding=True, NL='prelu', bn=bn), nn.MaxPool2d(2), Conv2d(16, 32, 7, same_padding=True, NL='prelu', bn=bn), nn.MaxPool2d(2), Conv2d(32, 32, 7, same_padding=True, NL='prelu', bn=bn), Conv2d(32, 32, 7, same_padding=True, NL='prelu', bn=bn)) self.roi_pool = RoIPool([16, 16], 1 / 4.0) self.hl_prior_conv2d = Conv2d(32, 16, 1, same_padding=True, NL='prelu', bn=bn) self.bbx_pred = nn.Sequential(FC(16 * 16 * 16, 512, NL='prelu'), FC(512, 256, NL='prelu'), FC(256, self.num_classes, NL='prelu')) # generate dense map self.den_stage_1 = nn.Sequential( Conv2d(32, 32, 7, same_padding=True, NL='prelu', bn=bn), nn.MaxPool2d(2), Conv2d(32, 64, 5, same_padding=True, NL='prelu', bn=bn), nn.MaxPool2d(2), Conv2d(64, 32, 5, same_padding=True, NL='prelu', bn=bn), Conv2d(32, 32, 5, same_padding=True, NL='prelu', bn=bn)) self.den_stage_DULR = nn.Sequential( convDU(in_out_channels=32, kernel_size=(1, 9)), convLR(in_out_channels=32, kernel_size=(9, 1))) self.den_stage_2 = nn.Sequential( Conv2d(64, 64, 3, same_padding=True, NL='prelu', bn=bn), Conv2d(64, 32, 3, same_padding=True, NL='prelu', bn=bn), nn.ConvTranspose2d(32, 16, 4, stride=2, padding=1, output_padding=0, bias=True), nn.PReLU(), nn.ConvTranspose2d(16, 8, 4, stride=2, padding=1, output_padding=0, bias=True), nn.PReLU()) # generrate seg map self.seg_stage = nn.Sequential( Conv2d(32, 32, 1, same_padding=True, NL='prelu', bn=bn), Conv2d(32, 64, 3, same_padding=True, NL='prelu', bn=bn), Conv2d(64, 32, 3, same_padding=True, NL='prelu', bn=bn), nn.ConvTranspose2d(32, 16, 4, stride=2, padding=1, output_padding=0, bias=True), nn.PReLU(), nn.ConvTranspose2d(16, 8, 4, stride=2, padding=1, output_padding=0, bias=True), nn.PReLU()) self.seg_pred = Conv2d(8, 2, 1, same_padding=True, NL='relu', bn=bn) self.trans_den = Conv2d(8, 8, 1, same_padding=True, NL='relu', bn=bn) self.den_pred = Conv2d(16, 1, 1, same_padding=True, NL='relu', bn=bn) # initialize_weights(self.modules()) weights_normal_init(self.base_layer, self.hl_prior, self.hl_prior_conv2d, self.bbx_pred, self.den_stage_1, \ self.den_stage_DULR, self.den_stage_2, self.trans_den, self.den_pred) initialize_weights(self.seg_stage, self.seg_pred)