def __init__(self, pretrained=True): super(VGG_decoder, self).__init__() vgg = models.vgg16(pretrained=pretrained) # if pretrained: # vgg.load_state_dict(torch.load(model_path)) features = list(vgg.features.children()) self.features4 = nn.Sequential(*features[0:23]) self.de_pred = nn.Sequential( Conv2d(512, 128, 3, same_padding=True, NL='relu'), nn.ConvTranspose2d(128, 64, 4, stride=2, padding=1, output_padding=0, bias=True), nn.ReLU(), nn.ConvTranspose2d(64, 32, 4, stride=2, padding=1, output_padding=0, bias=True), nn.ReLU(), nn.ConvTranspose2d(32, 16, 4, stride=2, padding=1, output_padding=0, bias=True), nn.ReLU(), Conv2d(16, 1, 1, same_padding=True, NL='relu'))
def __init__(self): super(VGG, self).__init__() vgg = models.vgg16(pretrained=True) features = list(vgg.features.children()) self.features4 = nn.Sequential(*features[0:23]) self.de_pred = nn.Sequential(Conv2d(512, 128, 1, same_padding=True, NL='relu'), Conv2d(128, 1, 1, same_padding=True, NL='relu'))
def __init__(self, pretrained=True): super(VGG, self).__init__() vgg = models.vgg16(pretrained=pretrained) # if pretrained: # vgg.load_state_dict(torch.load(model_path)) features = list(vgg.features.children()) self.features4 = nn.Sequential(*features[0:23]) self.de_pred = nn.Sequential( Conv2d(512, 128, 1, same_padding=True, NL='relu'), Conv2d(128, 1, 1, same_padding=True, NL='relu'))
def __init__(self, ): super(Res101, self).__init__() self.de_pred = nn.Sequential( Conv2d(1024, 128, 1, same_padding=True, NL='relu'), Conv2d(128, 1, 1, same_padding=True, NL='relu')) # initialize_weights(self.modules()) res = models.resnet101() pre_wts = torch.load(model_path) res.load_state_dict(pre_wts) self.frontend = nn.Sequential(res.conv1, res.bn1, res.relu, res.maxpool, res.layer1, res.layer2) self.own_reslayer_3 = make_res_layer(Bottleneck, 256, 23, stride=1) self.own_reslayer_3.load_state_dict(res.layer3.state_dict())
def __init__(self, DA=False, pretrained=True): super(Res50, self).__init__() self.de_pred1 = Conv2d(1024, 128, 1, same_padding=True, NL='relu') self.de_pred2 = Conv2d(128, 1, 1, same_padding=True, NL='relu') initialize_weights(self.modules()) res = models.resnet50(pretrained=pretrained) # pre_wts = torch.load(model_path) # res.load_state_dict(pre_wts) self.frontend = nn.Sequential(res.conv1, res.bn1, res.relu, res.maxpool, res.layer1, res.layer2) self.own_reslayer_3 = make_res_layer(Bottleneck, 256, 6, stride=1) if pretrained: self.own_reslayer_3.load_state_dict(res.layer3.state_dict()) self.DA = DA
def __init__(self, pretrained=True): super(AlexNet, self).__init__() alex = models.alexnet(pretrained=pretrained) # if pretrained: # alex.load_state_dict(torch.load(model_path)) features = list(alex.features.children()) self.layer1 = nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=4) # original padding is 4 self.layer1plus = nn.Sequential(nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2)) self.layer2 = nn.Conv2d(64, 192, kernel_size=5, padding=3) # original padding is 2 self.layer2plus_to_5 = nn.Sequential(*features[4:12]) self.de_pred = nn.Sequential( Conv2d(256, 128, 1, same_padding=True, NL='relu'), Conv2d(128, 1, 1, same_padding=True, NL='relu')) self.layer1.load_state_dict(alex.features[0].state_dict()) self.layer2.load_state_dict(alex.features[3].state_dict())
def __init__(self, pretrained=True): super(CascadeCNN, self).__init__() vgg = models.vgg16(pretrained=pretrained) # if pretrained: # vgg.load_state_dict(torch.load(model_path)) features = list(vgg.features.children()) self.features4 = nn.Sequential(*features[0:23]) # self.features5 = nn.Sequential(*features[0:23]) self.de_pred_f = nn.Sequential( Conv2d(512, 128, 1, same_padding=True, NL='relu'), Conv2d(128, 1, 1, same_padding=True, NL='relu')) self.fc_loc = nn.Sequential(nn.Linear(10 * 11 * 16, 32), nn.ReLU(True), nn.Linear(32, 3 * 2)) self.localization = nn.Sequential(nn.Conv2d(1, 8, kernel_size=7), nn.MaxPool2d(2, stride=2), nn.ReLU(True), nn.Conv2d(8, 10, kernel_size=5), nn.MaxPool2d(2, stride=2), nn.ReLU(True)) self.fc_loc_1 = nn.Sequential(nn.Linear(10 * 11 * 16, 32), nn.ReLU(True), nn.Linear(32, 3 * 2)) self.localization_1 = nn.Sequential(nn.Conv2d(512, 8, kernel_size=7), nn.MaxPool2d(2, stride=2), nn.ReLU(True), nn.Conv2d(8, 10, kernel_size=5), nn.MaxPool2d(2, stride=2), nn.ReLU(True)) self.reload_weight() self.features5 = self.features4 self.de_pred_p = nn.Sequential( Conv2d(1024, 128, 1, same_padding=True, NL='relu'), Conv2d(128, 1, 1, same_padding=True, NL='relu')) # self.de_pred_p = self.de_pred_f self.freeze_f()
def __init__(self): super(RAZ_loc, self).__init__() vgg = models.vgg16(pretrained=True) features = list(vgg.features.children()) self.features4 = nn.Sequential(*features[0:23]) self.de_pred = nn.Sequential( Conv2d(512, 512, 3, same_padding=True, NL='relu', dilation=2), Conv2d(512, 512, 3, same_padding=True, NL='relu', dilation=2), Conv2d(512, 512, 3, same_padding=True, NL='relu', dilation=2), nn.ConvTranspose2d(512, 256, 4, stride=2, padding=1, output_padding=0, bias=True), nn.ReLU(), Conv2d(256, 256, 3, same_padding=True, NL='relu', dilation=2), nn.ConvTranspose2d(256, 128, 4, stride=2, padding=1, output_padding=0, bias=True), nn.ReLU(), Conv2d(128, 128, 3, same_padding=True, NL='relu', dilation=2), nn.ConvTranspose2d(128, 64, 4, stride=2, padding=1, output_padding=0, bias=True), nn.ReLU(), Conv2d(64, 64, 3, same_padding=True, NL='relu', dilation=2), Conv2d(64, 2, 1, same_padding=True, NL='relu'))
def __init__(self, ): super(Res50_FPN, self).__init__() self.pyramid_feature_size = 256 self.pred_feature_size = 256 self.de_pred1 = nn.Sequential( Conv2d(self.pyramid_feature_size, self.pred_feature_size, 1, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 3, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 3, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 3, same_padding=True, NL='relu'), Conv2d(self.pred_feature_size, 1, 1, same_padding=True, NL='relu')) self.de_pred2 = nn.Sequential( Conv2d(self.pyramid_feature_size, self.pred_feature_size, 1, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 3, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 3, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 3, same_padding=True, NL='relu'), Conv2d(self.pred_feature_size, 1, 1, same_padding=True, NL='relu')) self.de_pred3 = nn.Sequential( Conv2d(self.pyramid_feature_size, self.pred_feature_size, 1, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 3, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 3, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 3, same_padding=True, NL='relu'), Conv2d(self.pred_feature_size, 1, 1, same_padding=True, NL='relu')) self.de_pred4 = nn.Sequential( Conv2d(self.pyramid_feature_size, self.pred_feature_size, 1, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 3, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 3, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 3, same_padding=True, NL='relu'), Conv2d(self.pred_feature_size, 1, 1, same_padding=True, NL='relu')) self.de_pred = nn.Sequential( Conv2d(3, self.pred_feature_size, 5, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 5, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 3, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 3, same_padding=True, NL='relu'), Conv2d(self.pred_feature_size, 1, 1, same_padding=True, NL='relu')) # self.de_pred1 = nn.Sequential(Conv2d(256, 128, 1, same_padding=True, NL='relu'), # Conv2d(128, 1, 1, same_padding=True, NL='relu')) # self.de_pred2 = nn.Sequential(Conv2d(512, 128, 1, same_padding=True, NL='relu'), # Conv2d(128, 1, 1, same_padding=True, NL='relu')) # self.de_pred3 = nn.Sequential(Conv2d(1024, 128, 1, same_padding=True, NL='relu'), # Conv2d(128, 1, 1, same_padding=True, NL='relu')) # self.de_pred4 = nn.Sequential(Conv2d(2048, 128, 1, same_padding=True, NL='relu'), # Conv2d(128, 1, 1, same_padding=True, NL='relu')) self.convP4in = nn.Sequential( Conv2d(2048, self.pyramid_feature_size, 1, same_padding=True, NL='relu')) self.convP4out = nn.Sequential( Conv2d(self.pyramid_feature_size, self.pyramid_feature_size, 3, same_padding=True, NL='relu')) self.convP3in = nn.Sequential( Conv2d(1024, self.pyramid_feature_size, 1, same_padding=True, NL='relu')) self.convP3out = nn.Sequential( Conv2d(self.pyramid_feature_size, self.pyramid_feature_size, 3, same_padding=True, NL='relu')) self.convP2in = nn.Sequential( Conv2d(512, self.pyramid_feature_size, 1, same_padding=True, NL='relu')) self.convP2out = nn.Sequential( Conv2d(self.pyramid_feature_size, self.pyramid_feature_size, 3, same_padding=True, NL='relu')) # initialize_weights(self.modules()) res = models.resnet50() pre_wts = torch.load(model_path) res.load_state_dict(pre_wts) self.frontend = nn.Sequential( res.conv1, res.bn1, res.relu, res.maxpool #, res.layer1, res.layer2 ) self.own_reslayer_1 = make_res_layer(Bottleneck, 64, 64, 3, stride=1) self.own_reslayer_1.load_state_dict(res.layer1.state_dict()) self.own_reslayer_2 = make_res_layer(Bottleneck, 256, 128, 4, stride=2) self.own_reslayer_2.load_state_dict(res.layer2.state_dict()) self.own_reslayer_3 = make_res_layer(Bottleneck, 512, 256, 6, stride=2) self.own_reslayer_3.load_state_dict(res.layer3.state_dict()) self.own_reslayer_4 = make_res_layer(Bottleneck, 1024, 512, 3, stride=2) self.own_reslayer_4.load_state_dict(res.layer4.state_dict())
def __init__(self, bn=False): super(MCNN, self).__init__() self.branch1 = nn.Sequential( Conv2d(3, 16, 9, same_padding=True, bn=bn), nn.MaxPool2d(2), Conv2d(16, 32, 7, same_padding=True, bn=bn), nn.MaxPool2d(2), Conv2d(32, 16, 7, same_padding=True, bn=bn), Conv2d(16, 8, 7, same_padding=True, bn=bn)) self.branch2 = nn.Sequential( Conv2d(3, 20, 7, same_padding=True, bn=bn), nn.MaxPool2d(2), Conv2d(20, 40, 5, same_padding=True, bn=bn), nn.MaxPool2d(2), Conv2d(40, 20, 5, same_padding=True, bn=bn), Conv2d(20, 10, 5, same_padding=True, bn=bn)) self.branch3 = nn.Sequential( Conv2d(3, 24, 5, same_padding=True, bn=bn), nn.MaxPool2d(2), Conv2d(24, 48, 3, same_padding=True, bn=bn), nn.MaxPool2d(2), Conv2d(48, 24, 3, same_padding=True, bn=bn), Conv2d(24, 12, 3, same_padding=True, bn=bn)) self.fuse = nn.Sequential(Conv2d(30, 1, 1, same_padding=True, bn=bn)) initialize_weights(self.modules())
def __init__(self, bn=False, num_classes=10): super(CMTL, self).__init__() self.num_classes = num_classes self.base_layer = nn.Sequential( Conv2d(3, 16, 9, same_padding=True, NL='prelu', bn=bn), Conv2d(16, 32, 7, same_padding=True, NL='prelu', bn=bn)) self.hl_prior_1 = nn.Sequential( Conv2d(32, 16, 9, same_padding=True, NL='prelu', bn=bn), nn.MaxPool2d(2), Conv2d(16, 32, 7, same_padding=True, NL='prelu', bn=bn), nn.MaxPool2d(2), Conv2d(32, 16, 7, same_padding=True, NL='prelu', bn=bn), Conv2d(16, 8, 7, same_padding=True, NL='prelu', bn=bn)) self.hl_prior_2 = nn.Sequential( nn.AdaptiveMaxPool2d((32, 32)), Conv2d(8, 4, 1, same_padding=True, NL='prelu', bn=bn)) self.hl_prior_fc1 = FC(4 * 1024, 512, NL='prelu') self.hl_prior_fc2 = FC(512, 256, NL='prelu') self.hl_prior_fc3 = FC(256, self.num_classes, NL='prelu') self.de_stage_1 = nn.Sequential( Conv2d(32, 20, 7, same_padding=True, NL='prelu', bn=bn), nn.MaxPool2d(2), Conv2d(20, 40, 5, same_padding=True, NL='prelu', bn=bn), nn.MaxPool2d(2), Conv2d(40, 20, 5, same_padding=True, NL='prelu', bn=bn), Conv2d(20, 10, 5, same_padding=True, NL='prelu', bn=bn)) self.de_stage_2 = nn.Sequential( Conv2d(18, 24, 3, same_padding=True, NL='prelu', bn=bn), Conv2d(24, 32, 3, same_padding=True, NL='prelu', bn=bn), nn.ConvTranspose2d(32, 16, 4, stride=2, padding=1, output_padding=0, bias=True), nn.PReLU(), nn.ConvTranspose2d(16, 8, 4, stride=2, padding=1, output_padding=0, bias=True), nn.PReLU(), Conv2d(8, 1, 1, same_padding=True, NL='relu', bn=bn)) # weights_normal_init(self.base_layer, self.hl_prior_1, self.hl_prior_2, self.hl_prior_fc1, self.hl_prior_fc2, \ # self.hl_prior_fc3, self.de_stage_1, self.de_stage_2) initialize_weights(self.modules())
def __init__(self, ): super(Res101_FPN, self).__init__() self.pyramid_feature_size = 256 self.pred_feature_size = 128 self.backend_feat = [64] # self.de_pred1 = nn.Sequential(make_layers(self.backend_feat,in_channels = self.pyramid_feature_size,dilation = False), # convDU(in_out_channels=64,kernel_size=(1,9)), # convLR(in_out_channels=64,kernel_size=(9,1)), # Conv2d(64, 1, 1, same_padding=True, NL='relu')) # self.de_pred2 = nn.Sequential(make_layers(self.backend_feat,in_channels = self.pyramid_feature_size,dilation = False), # convDU(in_out_channels=64,kernel_size=(1,9)), # convLR(in_out_channels=64,kernel_size=(9,1)), # Conv2d(64, 1, 1, same_padding=True, NL='relu')) # self.de_pred3 = nn.Sequential(make_layers(self.backend_feat,in_channels = self.pyramid_feature_size,dilation = False), # convDU(in_out_channels=64,kernel_size=(1,9)), # convLR(in_out_channels=64,kernel_size=(9,1)), # Conv2d(64, 1, 1, same_padding=True, NL='relu')) # self.de_pred4 = nn.Sequential(make_layers(self.backend_feat,in_channels = self.pyramid_feature_size,dilation = False), # convDU(in_out_channels=64,kernel_size=(1,9)), # convLR(in_out_channels=64,kernel_size=(9,1)), # Conv2d(64, 1, 1, same_padding=True, NL='relu')) # self.de_pred = nn.Sequential(make_layers(self.backend_feat,in_channels = 3,dilation = False), # convDU(in_out_channels=64,kernel_size=(1,9)), # convLR(in_out_channels=64,kernel_size=(9,1)), # Conv2d(64, 1, 1, same_padding=True, NL='relu')) self.de_pred1 = nn.Sequential( Conv2d(self.pyramid_feature_size, self.pred_feature_size, 1, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 3, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 3, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 3, same_padding=True, NL='relu'), Conv2d(self.pred_feature_size, 1, 1, same_padding=True, NL='relu')) self.de_pred2 = nn.Sequential( Conv2d(self.pyramid_feature_size, self.pred_feature_size, 1, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 3, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 3, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 3, same_padding=True, NL='relu'), Conv2d(self.pred_feature_size, 1, 1, same_padding=True, NL='relu')) self.de_pred3 = nn.Sequential( Conv2d(self.pyramid_feature_size, self.pred_feature_size, 1, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 3, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 3, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 3, same_padding=True, NL='relu'), Conv2d(self.pred_feature_size, 1, 1, same_padding=True, NL='relu')) self.de_pred4 = nn.Sequential( Conv2d(self.pyramid_feature_size, self.pred_feature_size, 1, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 3, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 3, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 3, same_padding=True, NL='relu'), Conv2d(self.pred_feature_size, 1, 1, same_padding=True, NL='relu')) self.de_pred = nn.Sequential( Conv2d(3, self.pred_feature_size, 5, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 5, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 3, same_padding=True, NL='relu'), # Conv2d(self.pred_feature_size, self.pred_feature_size, 3, same_padding=True, NL='relu'), Conv2d(self.pred_feature_size, 1, 1, same_padding=True, NL='relu')) # self.de_pred1 = nn.Sequential(Conv2d(256, 128, 1, same_padding=True, NL='relu'), # Conv2d(128, 1, 1, same_padding=True, NL='relu')) # self.de_pred2 = nn.Sequential(Conv2d(512, 128, 1, same_padding=True, NL='relu'), # Conv2d(128, 1, 1, same_padding=True, NL='relu')) # self.de_pred3 = nn.Sequential(Conv2d(1024, 128, 1, same_padding=True, NL='relu'), # Conv2d(128, 1, 1, same_padding=True, NL='relu')) # self.de_pred4 = nn.Sequential(Conv2d(2048, 128, 1, same_padding=True, NL='relu'), # Conv2d(128, 1, 1, same_padding=True, NL='relu')) self.convP4in = nn.Sequential( Conv2d(2048, self.pyramid_feature_size, 1, same_padding=True, NL='relu')) self.convP4out = nn.Sequential( Conv2d(self.pyramid_feature_size, self.pyramid_feature_size, 3, same_padding=True, NL='relu')) self.convP3in = nn.Sequential( Conv2d(1024, self.pyramid_feature_size, 1, same_padding=True, NL='relu')) self.convP3out = nn.Sequential( Conv2d(self.pyramid_feature_size, self.pyramid_feature_size, 3, same_padding=True, NL='relu')) self.convP2in = nn.Sequential( Conv2d(512, self.pyramid_feature_size, 1, same_padding=True, NL='relu')) self.convP2out = nn.Sequential( Conv2d(self.pyramid_feature_size, self.pyramid_feature_size, 3, same_padding=True, NL='relu')) # initialize_weights(self.modules()) res = models.resnet101() pre_wts = torch.load(model_path) res.load_state_dict(pre_wts) self.frontend = nn.Sequential( res.conv1, res.bn1, res.relu, res.maxpool #, res.layer1, res.layer2 ) self.own_reslayer_1 = make_res_layer(Bottleneck, 64, 64, 3, stride=1) self.own_reslayer_1.load_state_dict(res.layer1.state_dict()) self.own_reslayer_2 = make_res_layer(Bottleneck, 256, 128, 4, stride=2) self.own_reslayer_2.load_state_dict(res.layer2.state_dict()) self.own_reslayer_3 = make_res_layer(Bottleneck, 512, 256, 23, stride=2) self.own_reslayer_3.load_state_dict(res.layer3.state_dict()) self.own_reslayer_4 = make_res_layer(Bottleneck, 1024, 512, 3, stride=2) self.own_reslayer_4.load_state_dict(res.layer4.state_dict())
def __init__(self, bn=False, num_classes=10): super(ori, self).__init__() self.num_classes = num_classes self.base_layer = nn.Sequential( Conv2d(1, 16, 9, same_padding=True, NL='prelu', bn=bn), Conv2d(16, 32, 7, same_padding=True, NL='prelu', bn=bn)) self.hl_prior = nn.Sequential( Conv2d(32, 16, 9, same_padding=True, NL='prelu', bn=bn), nn.MaxPool2d(2), Conv2d(16, 32, 7, same_padding=True, NL='prelu', bn=bn), nn.MaxPool2d(2), Conv2d(32, 32, 7, same_padding=True, NL='prelu', bn=bn), Conv2d(32, 32, 7, same_padding=True, NL='prelu', bn=bn)) self.roi_pool = RoIPool([16, 16], 1 / 4.0) self.hl_prior_conv2d = Conv2d(32, 16, 1, same_padding=True, NL='prelu', bn=bn) self.bbx_pred = nn.Sequential(FC(16 * 16 * 16, 512, NL='prelu'), FC(512, 256, NL='prelu'), FC(256, self.num_classes, NL='prelu')) # generate dense map self.den_stage_1 = nn.Sequential( Conv2d(32, 32, 7, same_padding=True, NL='prelu', bn=bn), nn.MaxPool2d(2), Conv2d(32, 64, 5, same_padding=True, NL='prelu', bn=bn), nn.MaxPool2d(2), Conv2d(64, 32, 5, same_padding=True, NL='prelu', bn=bn), Conv2d(32, 32, 5, same_padding=True, NL='prelu', bn=bn)) self.den_stage_DULR = nn.Sequential( convDU(in_out_channels=32, kernel_size=(1, 9)), convLR(in_out_channels=32, kernel_size=(9, 1))) self.den_stage_2 = nn.Sequential( Conv2d(64, 64, 3, same_padding=True, NL='prelu', bn=bn), Conv2d(64, 32, 3, same_padding=True, NL='prelu', bn=bn), nn.ConvTranspose2d(32, 16, 4, stride=2, padding=1, output_padding=0, bias=True), nn.PReLU(), nn.ConvTranspose2d(16, 8, 4, stride=2, padding=1, output_padding=0, bias=True), nn.PReLU()) # generrate seg map self.seg_stage = nn.Sequential( Conv2d(32, 32, 1, same_padding=True, NL='prelu', bn=bn), Conv2d(32, 64, 3, same_padding=True, NL='prelu', bn=bn), Conv2d(64, 32, 3, same_padding=True, NL='prelu', bn=bn), nn.ConvTranspose2d(32, 16, 4, stride=2, padding=1, output_padding=0, bias=True), nn.PReLU(), nn.ConvTranspose2d(16, 8, 4, stride=2, padding=1, output_padding=0, bias=True), nn.PReLU()) self.seg_pred = Conv2d(8, 2, 1, same_padding=True, NL='relu', bn=bn) self.trans_den = Conv2d(8, 8, 1, same_padding=True, NL='relu', bn=bn) self.den_pred = Conv2d(16, 1, 1, same_padding=True, NL='relu', bn=bn) # initialize_weights(self.modules()) weights_normal_init(self.base_layer, self.hl_prior, self.hl_prior_conv2d, self.bbx_pred, self.den_stage_1, \ self.den_stage_DULR, self.den_stage_2, self.trans_den, self.den_pred) initialize_weights(self.seg_stage, self.seg_pred)