Esempio n. 1
0
 def __init__(self):
     super().__init__()
     self.vgg = vgg.vgg16_bn()
     self.pairwise = nn.Conv2d(1024, 3, kernel_size=(3, 3))
     self.order_predict = nn.Linear(5 * 5 * 3 * 6, 12)
     self.relu = nn.ReLU()
     self.drop = nn.Dropout(0.4)
Esempio n. 2
0
def get_network(args, use_gpu=True):
    if args.net == 'vgg16':
        from model.vgg import vgg16_bn
        net = vgg16_bn()
    else:
        print('the network name you have entered is not supported yet')
        sys.exit()

    if use_gpu:
        net = net.cuda()
    return net
Esempio n. 3
0
def main():

    # ======== data set preprocess =============
    # ======== mean-variance normalization is removed
    if args.dataset == 'CIFAR10':
        transform = transforms.Compose([transforms.ToTensor()])
        trainset = datasets.CIFAR10(root=args.data_dir,
                                    train=True,
                                    download=True,
                                    transform=transform)
        testset = datasets.CIFAR10(root=args.data_dir,
                                   train=False,
                                   download=True,
                                   transform=transform)
    elif args.dataset == 'CIFAR100':
        args.num_classes = 100
        transform = transforms.Compose([transforms.ToTensor()])
        trainset = datasets.CIFAR100(root=args.data_dir,
                                     train=True,
                                     download=True,
                                     transform=transform)
        testset = datasets.CIFAR100(root=args.data_dir,
                                    train=False,
                                    download=True,
                                    transform=transform)
    elif args.dataset == 'svhn':
        transform = transforms.Compose([transforms.ToTensor()])
        trainset = datasets.SVHN(root=args.data_dir,
                                 split='train',
                                 download=True,
                                 transform=transform)
        testset = datasets.SVHN(root=args.data_dir,
                                split='test',
                                download=True,
                                transform=transform)
    else:
        assert False, "Unknow dataset : {}".format(args.dataset)
    trainloader = data.DataLoader(trainset,
                                  batch_size=args.batch_size,
                                  shuffle=False)
    testloader = data.DataLoader(testset,
                                 batch_size=args.batch_size,
                                 shuffle=False)
    train_num, test_num = len(trainset), len(testset)
    print('-------- DATA INFOMATION --------')
    print('---- dataset: ' + args.dataset)
    print('---- #train : %d' % train_num)
    print('---- #test  : %d' % test_num)

    # ======== load network ========
    checkpoint = torch.load(args.model_path, map_location=torch.device("cpu"))
    if args.model == 'vgg11':
        from model.vgg import vgg11_bn
        net = vgg11_bn(num_classes=args.num_classes).cuda()
    elif args.model == 'vgg13':
        from model.vgg import vgg13_bn
        net = vgg13_bn(num_classes=args.num_classes).cuda()
    elif args.model == 'vgg16':
        from model.vgg import vgg16_bn
        net = vgg16_bn(num_classes=args.num_classes).cuda()
    elif args.model == 'vgg19':
        from model.vgg import vgg19_bn
        net = vgg19_bn(num_classes=args.num_classes).cuda()
    elif args.model == 'resnet20':
        from model.resnet_v1 import resnet20
        net = resnet20(num_classes=args.num_classes).cuda()
    elif args.model == 'resnet32':
        from model.resnet_v1 import resnet32
        net = resnet32(num_classes=args.num_classes).cuda()
    elif args.model == 'wrn28x5':
        from model.wideresnet import wrn28
        net = wrn28(widen_factor=5, num_classes=args.num_classes).cuda()
    elif args.model == 'wrn28x10':
        from model.wideresnet import wrn28
        net = wrn28(widen_factor=10, num_classes=args.num_classes).cuda()
    else:
        assert False, "Unknow model : {}".format(args.model)
    net = nn.DataParallel(net)
    net.load_state_dict(checkpoint['state_dict'])
    net.eval()
    print('-------- MODEL INFORMATION --------')
    print('---- model:      ' + args.model)
    print('---- saved path: ' + args.model_path)

    print('-------- START TESTING --------')
    corr_tr, corr_te = test(net, trainloader, testloader)
    acc_tr, acc_te = corr_tr / train_num, corr_te / test_num
    print('Train acc. = %f; Test acc. = %f.' % (acc_tr, acc_te))

    print('-------- START FGSM ATTACK --------')
    fgsm_epsilons = [
        1 / 255, 2 / 255, 3 / 255, 4 / 255, 5 / 255, 6 / 255, 7 / 255, 8 / 255,
        9 / 255, 10 / 255, 11 / 255, 12 / 255
    ]
    print('---- EPSILONS: ', fgsm_epsilons)
    for eps in fgsm_epsilons:
        print('---- current eps = %.3f...' % eps)
        correct_te_fgsm = attack(net, testloader, eps, "FGSM")
        acc_te_fgsm = correct_te_fgsm / float(test_num)
        print('Attacked test acc. = %f.' % acc_te_fgsm)

    print('-------- START PGD ATTACK -------')
    pgd_epsilons = [
        1 / 255, 2 / 255, 3 / 255, 4 / 255, 5 / 255, 6 / 255, 7 / 255, 8 / 255,
        9 / 255, 10 / 255, 11 / 255, 12 / 255
    ]
    print('---- EPSILON: ', pgd_epsilons)
    for eps in pgd_epsilons:
        print('---- current eps = %.3f...' % eps)
        corr_te_pgd = attack(net, testloader, eps, "PGD")
        acc_te_pgd = corr_te_pgd / float(test_num)
        print('Attacked test acc. = %f.' % acc_te_pgd)
    def __init__(self, layers=50, classes=2, criterion=nn.CrossEntropyLoss(ignore_index=255),
                 pretrained=True, shot=1, ppm_scales=[60, 30, 15, 8], vgg=False, FPN=True):
        super(FSSNet, self).__init__()
        assert layers in [50, 101, 152]
        print(ppm_scales)
        assert classes > 1
        from torch.nn import BatchNorm2d as BatchNorm
        # 参数
        self.criterion = criterion
        self.shot = shot
        self.vgg = vgg
        self.trip = nn.Parameter(torch.zeros(1))

        models.BatchNorm = BatchNorm

        # Backbone Related
        if self.vgg:
            print('INFO: Using VGG_16 bn')
            vgg_models.BatchNorm = BatchNorm
            vgg16 = vgg_models.vgg16_bn(pretrained=pretrained)
            print(vgg16)
            self.layer0, self.layer1, self.layer2, \
                self.layer3, self.layer4 = get_vgg16_layer(vgg16)

        else:
            print('INFO: Using ResNet {}'.format(layers))
            if layers == 50:
                resnet = models.resnet50(pretrained=pretrained)
            elif layers == 101:
                resnet = models.resnet101(pretrained=pretrained)
            else:
                resnet = models.resnet152(pretrained=pretrained)
            self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu1, resnet.conv2, resnet.bn2, resnet.relu2,
                                        resnet.conv3, resnet.bn3, resnet.relu3, resnet.maxpool)
            self.layer1, self.layer2, self.layer3, self.layer4 = resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4


        # feature dimension
        reduce_dim = 256
        class_num =2
        # if self.vgg:
        #     fea_dim = 512 + 256
        # else:
        #     fea_dim = 1024 + 512

        # query  下采样
        self.down = nn.Sequential(
            nn.Conv2d(512, reduce_dim, kernel_size=1, padding=0, bias=False),
            nn.ReLU(inplace=True),
            nn.Dropout2d(p=0.5)
        )
        self.down_s = nn.Sequential(
            nn.Conv2d(1024, reduce_dim, kernel_size=1, padding=0, bias=False),
            nn.ReLU(inplace=True),
            nn.Dropout2d(p=0.5)
        )
        #
        # # support 下采样
        self.Tripletencoder = Tripletencoder101(FPN).cuda()
        # 256 -> 256
        self.res1 = nn.Sequential(
            nn.Conv2d(reduce_dim, reduce_dim, kernel_size=1, padding=0, bias=False),
            nn.ReLU(inplace=True),
        )
        # 256 -> 256
        self.res2 = nn.Sequential(
            nn.Conv2d(reduce_dim, reduce_dim, kernel_size=3, padding=1, bias=False),
            nn.ReLU(inplace=True),
            nn.Conv2d(reduce_dim, reduce_dim, kernel_size=3, padding=1, bias=False),
            nn.ReLU(inplace=True),
        )
        # 256 -> 256
        self.res3 = nn.Sequential(
            nn.Conv2d(reduce_dim, reduce_dim, kernel_size=3, padding=1, bias=False),
            nn.ReLU(inplace=True),
            nn.Conv2d(reduce_dim, reduce_dim, kernel_size=3, padding=1, bias=False),
            nn.ReLU(inplace=True),
        )
        # ASPP
        self.ASPP = ASPP().cuda()
        self.GCN = g_GCN(reduce_dim, int(reduce_dim / 2)).cuda()


        # 512 -> 256


        self.cls = nn.Sequential(
            nn.Conv2d(reduce_dim, reduce_dim, kernel_size=3, padding=1, bias=False),
            nn.ReLU(inplace=True),
            nn.Dropout2d(p=0.1),
            nn.Conv2d(reduce_dim, classes, kernel_size=1)
        )

        self.int1 = nn.Sequential(
            nn.Conv2d(reduce_dim+1, reduce_dim, kernel_size=1, padding=0, bias=False),
            nn.ReLU(inplace=True))

        self.int2 = nn.Sequential(
            nn.Conv2d(reduce_dim+1, reduce_dim, kernel_size=1, padding=0, bias=False),
            nn.ReLU(inplace=True))
        self.int3 = nn.Sequential(
            nn.Conv2d(reduce_dim+1, reduce_dim, kernel_size=1, padding=0, bias=False),
            nn.ReLU(inplace=True))
Esempio n. 5
0
    def __init__(self, layers=50, classes=2, zoom_factor=8, \
        criterion=nn.CrossEntropyLoss(ignore_index=255), BatchNorm=nn.BatchNorm2d, \
        pretrained=True, sync_bn=True, shot=1, ppm_scales=[60, 30, 15, 8], vgg=False):
        super(PFENet, self).__init__()
        assert layers in [50, 101, 152]
        print(ppm_scales)
        assert classes > 1
        from torch.nn import BatchNorm2d as BatchNorm
        self.zoom_factor = zoom_factor
        self.criterion = criterion
        self.shot = shot
        self.ppm_scales = ppm_scales
        self.vgg = vgg

        models.BatchNorm = BatchNorm

        if self.vgg:
            print('INFO: Using VGG_16 bn')
            vgg_models.BatchNorm = BatchNorm
            vgg16 = vgg_models.vgg16_bn(pretrained=pretrained)
            print(vgg16)
            self.layer0, self.layer1, self.layer2, \
                self.layer3, self.layer4 = get_vgg16_layer(vgg16)

        else:
            print('INFO: Using ResNet {}'.format(layers))
            if layers == 50:
                resnet = models.resnet50(pretrained=pretrained)
            elif layers == 101:
                resnet = models.resnet101(pretrained=pretrained)
            else:
                resnet = models.resnet152(pretrained=pretrained)
            self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu1,
                                        resnet.conv2, resnet.bn2, resnet.relu2,
                                        resnet.conv3, resnet.bn3, resnet.relu3,
                                        resnet.maxpool)
            self.layer1, self.layer2, self.layer3, self.layer4 = resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4

            for n, m in self.layer3.named_modules():
                if 'conv2' in n:
                    m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1)
                elif 'downsample.0' in n:
                    m.stride = (1, 1)
            for n, m in self.layer4.named_modules():
                if 'conv2' in n:
                    m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1)
                elif 'downsample.0' in n:
                    m.stride = (1, 1)

        reduce_dim = 256
        if self.vgg:
            fea_dim = 512 + 256
        else:
            fea_dim = 1024 + 512

        self.cls = nn.Sequential(
            nn.Conv2d(reduce_dim,
                      reduce_dim,
                      kernel_size=3,
                      padding=1,
                      bias=False), nn.ReLU(inplace=True), nn.Dropout2d(p=0.1),
            nn.Conv2d(reduce_dim, classes, kernel_size=1))

        self.down_query = nn.Sequential(
            nn.Conv2d(fea_dim,
                      reduce_dim,
                      kernel_size=1,
                      padding=0,
                      bias=False), nn.ReLU(inplace=True), nn.Dropout2d(p=0.5))
        self.down_supp = nn.Sequential(
            nn.Conv2d(fea_dim,
                      reduce_dim,
                      kernel_size=1,
                      padding=0,
                      bias=False), nn.ReLU(inplace=True), nn.Dropout2d(p=0.5))

        self.pyramid_bins = ppm_scales
        self.avgpool_list = []
        for bin in self.pyramid_bins:
            if bin > 1:
                self.avgpool_list.append(nn.AdaptiveAvgPool2d(bin))

        factor = 1
        mask_add_num = 1
        self.init_merge = []
        self.beta_conv = []
        self.inner_cls = []
        for bin in self.pyramid_bins:
            self.init_merge.append(
                nn.Sequential(
                    nn.Conv2d(reduce_dim * 2 + mask_add_num,
                              reduce_dim,
                              kernel_size=1,
                              padding=0,
                              bias=False),
                    nn.ReLU(inplace=True),
                ))
            self.beta_conv.append(
                nn.Sequential(
                    nn.Conv2d(reduce_dim,
                              reduce_dim,
                              kernel_size=3,
                              padding=1,
                              bias=False), nn.ReLU(inplace=True),
                    nn.Conv2d(reduce_dim,
                              reduce_dim,
                              kernel_size=3,
                              padding=1,
                              bias=False), nn.ReLU(inplace=True)))
            self.inner_cls.append(
                nn.Sequential(
                    nn.Conv2d(reduce_dim,
                              reduce_dim,
                              kernel_size=3,
                              padding=1,
                              bias=False), nn.ReLU(inplace=True),
                    nn.Dropout2d(p=0.1),
                    nn.Conv2d(reduce_dim, classes, kernel_size=1)))
        self.init_merge = nn.ModuleList(self.init_merge)
        self.beta_conv = nn.ModuleList(self.beta_conv)
        self.inner_cls = nn.ModuleList(self.inner_cls)

        self.res1 = nn.Sequential(
            nn.Conv2d(reduce_dim * len(self.pyramid_bins),
                      reduce_dim,
                      kernel_size=1,
                      padding=0,
                      bias=False),
            nn.ReLU(inplace=True),
        )
        self.res2 = nn.Sequential(
            nn.Conv2d(reduce_dim,
                      reduce_dim,
                      kernel_size=3,
                      padding=1,
                      bias=False),
            nn.ReLU(inplace=True),
            nn.Conv2d(reduce_dim,
                      reduce_dim,
                      kernel_size=3,
                      padding=1,
                      bias=False),
            nn.ReLU(inplace=True),
        )

        self.GAP = nn.AdaptiveAvgPool2d(1)

        self.alpha_conv = []
        for idx in range(len(self.pyramid_bins) - 1):
            self.alpha_conv.append(
                nn.Sequential(
                    nn.Conv2d(512,
                              256,
                              kernel_size=1,
                              stride=1,
                              padding=0,
                              bias=False), nn.ReLU()))
        self.alpha_conv = nn.ModuleList(self.alpha_conv)
Esempio n. 6
0
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=args.batch_size,
                                          shuffle=False,
                                          pin_memory=True,
                                          num_workers=2)

if args.model == 'resnet18':
    cnn = ResNet18(num_classes=num_classes)
elif args.model == 'wideresnet':
    cnn = WideResNet(depth=28, num_classes=num_classes, widen_factor=10,
                     dropRate=0.3)
elif args.model == 'resnet20':
    cnn = resnet20(num_classes=num_classes)
elif args.model == 'vgg16':
    cnn = vgg16_bn(num_classes=num_classes)


# Wrap model if using input dropout.
if args.input_dropout:
    print('Wrapping model with input dropout.')
    cnn = augmentations.ModelWithInputDropout(
        cnn,
        args.keep_prob,
        num_samples=args.num_samples,
    )


criterion = torch.nn.CrossEntropyLoss()
if args.cuda:
    cnn = cnn.cuda()