def __init__(self, num_classes, pretrained=True, phase='train'): super(FCN8VGG, self).__init__() vgg = models.vgg19_bn() if pretrained: vgg.load_state_dict(torch.load(vgg19_bn_path)) features = list(vgg.features.children()) self.features3 = nn.Sequential(*features[0:27]) self.features4_4 = nn.Sequential(*features[27:39]) self.features4 = nn.Sequential(*features[39:40]) self.features5 = nn.Sequential(*features[40:]) self.features5_test = nn.Sequential(*features[40:52]) self.fconv3 = nn.Conv2d(256, num_classes, kernel_size=1) self.fconv4 = nn.Conv2d(512, num_classes, kernel_size=1) self.fconv5 = nn.Sequential( nn.Conv2d(512, 2048, kernel_size=7), nn.ReLU(inplace=True), nn.Dropout(), nn.Conv2d(2048, 2048, kernel_size=1), nn.ReLU(inplace=True), nn.Dropout(), nn.Conv2d(2048, num_classes, kernel_size=1) ) initialize_weights(self.fconv3, self.fconv4, self.fconv5) self.ssd_conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1, dilation=1)
def __init__(self, num_classes, pretrained=True): super(FCN8ResNet, self).__init__() res = models.resnet152() if pretrained: res.load_state_dict(torch.load(res152_path)) self.features3 = nn.Sequential(res.conv1, res.bn1, res.relu, res.maxpool, res.layer1, res.layer2) # pdb.set_trace() self.features4 = res.layer3 self.features5 = res.layer4 if cfg.TRAIN.LOC_P: loc_tmp = np.arange(0, 2, 2.0 / 64) - 1.0 loc_x = np.tile(loc_tmp, (64, 1)) loc_y = loc_x.transpose() loc = np.concatenate((loc_x[None, :, :], loc_x[None, :, :]), axis=0)[None, :, :, :] loc_input = np.repeat(loc, cfg.TRAIN.IMG_BATCH_SIZE, axis=0) loc_input = torch.from_numpy(loc_input.astype(np.float32)) self.loc_input = Variable(loc_input.cuda()) self.fconv3 = nn.Conv2d(514, num_classes, kernel_size=1) else: self.fconv3 = nn.Conv2d(512, num_classes, kernel_size=1) self.fconv4 = nn.Conv2d(2048, num_classes, kernel_size=1) self.fconv5 = nn.Conv2d(2560, num_classes, kernel_size=7) initialize_weights(self.fconv3, self.fconv4, self.fconv5) self.ssd_conv5 = nn.Conv2d(1024, 512, kernel_size=3, padding=1, dilation=1)
def __init__(self, num_classes, pretrained=True): super(FCN8ResNet, self).__init__() res = models.resnet152() if pretrained: res.load_state_dict(torch.load(res152_path)) self.features3 = nn.Sequential( res.conv1, res.bn1, res.relu, res.maxpool, res.layer1, res.layer2 ) # pdb.set_trace() self.features4 = res.layer3 self.features5 = res.layer4 self.fconv3 = nn.Conv2d(512, num_classes, kernel_size=1) self.fconv4 = nn.Conv2d(2048, num_classes, kernel_size=1) self.fconv5 = nn.Conv2d(2560, num_classes, kernel_size=7) initialize_weights(self.fconv3, self.fconv4, self.fconv5) self.ssd_conv5 = nn.Conv2d(1024, 512, kernel_size=3, padding=1, dilation=1)
def __init__(self, num_classes, pretrained=True, phase='train'): super(FCN8VGG, self).__init__() vgg = models.vgg19_bn() if pretrained: vgg.load_state_dict(torch.load(vgg19_bn_path)) features = list(vgg.features.children()) self.features3 = nn.Sequential(*features[0:27]) self.features4_4 = nn.Sequential(*features[27:39]) self.features4 = nn.Sequential(*features[39:40]) self.features5 = nn.Sequential(*features[40:]) self.features5_test = nn.Sequential(*features[40:52]) if cfg.TRAIN.LOC_P: loc_tmp = np.arange(0, 2, 2.0 / 64) - 1.0 loc_x = np.tile(loc_tmp, (64, 1)) loc_y = loc_x.transpose() loc = np.concatenate((loc_x[None, :, :], loc_x[None, :, :]), axis=0)[None, :, :, :] loc_input = np.repeat(loc, cfg.TRAIN.IMG_BATCH_SIZE, axis=0) loc_input = torch.from_numpy(loc_input.astype(np.float32)) self.loc_input = Variable(loc_input.cuda()) self.fconv3 = nn.Conv2d(258, num_classes, kernel_size=1) else: self.fconv3 = nn.Conv2d(256, num_classes, kernel_size=1) self.fconv4 = nn.Conv2d(1536, num_classes, kernel_size=1) self.fconv5 = nn.Sequential( nn.Conv2d(1024, 2048, kernel_size=7), nn.ReLU(inplace=True), nn.Dropout(), nn.Conv2d(2048, 2048, kernel_size=1), nn.ReLU(inplace=True), nn.Dropout(), nn.Conv2d(2048, num_classes, kernel_size=1)) initialize_weights(self.fconv3, self.fconv4, self.fconv5) self.ssd_conv5 = nn.Conv2d(512, 512, kernel_size=3, padding=1, dilation=1)
def __init__(self, num_classes, pretrained=True, phase='train'): super(FCN8VGG, self).__init__() vgg = models.vgg19_bn() if pretrained: vgg.load_state_dict(torch.load(vgg19_bn_path)) features = list(vgg.features.children()) self.features3_4 = nn.Sequential( *features[0:26]) # for FCN 256,128,128 # self.features3 = nn.Sequential(*features[26:27]) # for FCN 256,64,64 self.features4_4 = nn.Sequential(*features[26:39]) # for FCN 256,64,64 # self.features4 = nn.Sequential(*features[39:40]) # for FCN 512, 32, 32 self.features5_4 = nn.Sequential( *features[39:52]) # for FCN 512, 32, 32 # self.features5 = nn.Sequential(*features[52:]) # for FCN 512, 16, 16 self.fconv3 = nn.Conv2d(256, num_classes, kernel_size=1) self.fconv4 = nn.Conv2d(512, num_classes, kernel_size=1) self.fconv5 = nn.Sequential( nn.Conv2d(1536, 2048, kernel_size=7), nn.ReLU(inplace=True), nn.Dropout(), nn.Conv2d(2048, 2048, kernel_size=1), nn.ReLU(inplace=True), nn.Dropout(), nn.Conv2d(2048, num_classes, kernel_size=1)) initialize_weights(self.fconv3, self.fconv4, self.fconv5)