Exemple #1
0
    def __init__(self, batchNorm=True, pretrained=True):
        super().__init__()

        self.nnecnclayer = NNEncLayer()
        self.priorboostlayer = PriorBoostLayer()
        self.nongraymasklayer = NonGrayMaskLayer()
        # self.rebalancelayer = ClassRebalanceMultLayer()
        self.rebalancelayer = Rebalance_Op.apply
        # Rebalance_Op.apply
        self.pool = nn.AvgPool2d(4,4)
        self.upsample = nn.Upsample(scale_factor=4)

        self.bw_conv = nn.Conv2d(1,64,3, padding=1)
        self.main = ResNet50()

        #if pretrained:
        #    print('loading pretrained model....')
        #    self.main = resnet50(pretrained = True)
        #else:
        #    self.main = resnet50()

        self.main.conv1 = nn.Conv2d(1, 64, 3, padding=1)

        self.main.linear = nn.ConvTranspose2d(2048, 256, 4, 4)
        self.relu = nn.ReLU()

        self.conv_8 = conv(256,256,2,[1,1], batchNorm=False)
        self.conv313 = nn.Conv2d(256,313,1,1)
Exemple #2
0
def main():
    global args, best_prec1, best_ata
    args = parser.parse_args()
    print(args)

    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)
    torch.cuda.set_device(int(args.gpu))
    setup_seed(args.seed)

    model = ResNet50()
    normalize = NormalizeByChannelMeanStd(mean=[0.485, 0.456, 0.406],
                                          std=[0.229, 0.224, 0.225])
    model = nn.Sequential(normalize, model)
    model.cuda()
    cudnn.benchmark = True

    if args.pretrained_model:
        model_dict_pretrain = torch.load(
            args.pretrained_model,
            map_location=torch.device('cuda:' + str(args.gpu)))
        model.load_state_dict(model_dict_pretrain, strict=False)
        print('model loaded:', args.pretrained_model)

    #dataset process
    train_trans = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.RandomCrop(32, 4),
        transforms.ToTensor()
    ])

    val_trans = transforms.Compose([transforms.ToTensor()])

    #dataset process
    train_dataset = datasets.CIFAR10(args.data,
                                     train=True,
                                     transform=train_trans,
                                     download=True)
    test_dataset = datasets.CIFAR10(args.data,
                                    train=False,
                                    transform=val_trans,
                                    download=True)

    valid_size = 0.1
    indices = list(range(len(train_dataset)))
    split = int(np.floor(valid_size * len(train_dataset)))
    np.random.shuffle(indices)

    train_idx, valid_idx = indices[split:], indices[:split]
    train_sampler = torch.utils.data.Subset(train_dataset, train_idx)
    valid_sampler = torch.utils.data.Subset(train_dataset, valid_idx)

    train_loader = torch.utils.data.DataLoader(train_sampler,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=2,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(valid_sampler,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=2,
                                             pin_memory=True)

    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              num_workers=2,
                                              pin_memory=True)

    decreasing_lr = list(map(int, args.decreasing_lr.split(',')))

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                     milestones=decreasing_lr,
                                                     gamma=0.1)

    print('adv training')

    train_acc = []
    ta = []
    ata = []

    if os.path.exists(args.save_dir) is not True:
        os.mkdir(args.save_dir)

    for epoch in range(args.epochs):

        print(optimizer.state_dict()['param_groups'][0]['lr'])
        acc, loss = train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        tacc, tloss = validate(val_loader, model, criterion)

        atacc, atloss = validate_adv(val_loader, model, criterion)

        scheduler.step()

        train_acc.append(acc)
        ta.append(tacc)
        ata.append(atacc)

        # remember best prec@1 and save checkpoint
        is_best = tacc > best_prec1
        best_prec1 = max(tacc, best_prec1)

        ata_is_best = atacc > best_ata
        best_ata = max(atacc, best_ata)

        if is_best:

            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_prec1': best_prec1,
                },
                is_best,
                filename=os.path.join(args.save_dir, 'best_model.pt'))

        if ata_is_best:

            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_prec1': best_prec1,
                },
                is_best,
                filename=os.path.join(args.save_dir, 'ata_best_model.pt'))

        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
            },
            is_best,
            filename=os.path.join(args.save_dir, 'model.pt'))

        plt.plot(train_acc, label='train_acc')
        plt.plot(ta, label='TA')
        plt.plot(ata, label='ATA')
        plt.legend()
        plt.savefig(os.path.join(args.save_dir, 'net_train.png'))
        plt.close()

    best_model_path = os.path.join(args.save_dir, 'ata_best_model.pt')
    print('start testing ATA best model')
    model.load_state_dict(torch.load(best_model_path)['state_dict'])
    tacc, tloss = validate(test_loader, model, criterion)
    atacc, atloss = validate_adv(test_loader, model, criterion)

    best_model_path = os.path.join(args.save_dir, 'best_model.pt')
    print('start testing TA best model')
    model.load_state_dict(torch.load(best_model_path)['state_dict'])
    tacc, tloss = validate(test_loader, model, criterion)
    atacc, atloss = validate_adv(test_loader, model, criterion)
Exemple #3
0
def main():
    global args, best_prec1
    args = parser.parse_args()
    print(args)

    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)
    torch.cuda.set_device(int(args.gpu))

    setup_seed(args.seed)

    model = Rotation_model(4)
    normalize = NormalizeByChannelMeanStd(mean=[0.485, 0.456, 0.406],
                                          std=[0.229, 0.224, 0.225])
    model = nn.Sequential(normalize, model)
    model.cuda()

    cudnn.benchmark = True

    train_trans = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.RandomCrop(32, 4),
        transforms.ToTensor()
    ])

    val_trans = transforms.Compose([transforms.ToTensor()])

    #dataset process
    train_dataset = datasets.CIFAR10(args.data,
                                     train=True,
                                     transform=train_trans,
                                     download=True)
    test_dataset = datasets.CIFAR10(args.data,
                                    train=False,
                                    transform=val_trans,
                                    download=True)

    valid_size = 0.1
    indices = list(range(len(train_dataset)))
    split = int(np.floor(valid_size * len(train_dataset)))
    np.random.shuffle(indices)

    train_idx, valid_idx = indices[split:], indices[:split]
    train_sampler = torch.utils.data.Subset(train_dataset, train_idx)
    valid_sampler = torch.utils.data.Subset(train_dataset, valid_idx)

    train_loader = torch.utils.data.DataLoader(train_sampler,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=2,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(valid_sampler,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=2,
                                             pin_memory=True)

    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              num_workers=2,
                                              pin_memory=True)

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    scheduler = torch.optim.lr_scheduler.LambdaLR(
        optimizer,
        lr_lambda=lambda step: cosine_annealing(
            step,
            args.epochs * len(train_loader),
            1,  # since lr_lambda computes multiplicative factor
            1e-6 / args.lr))

    print('std training')
    train_acc = []
    ta = []

    if os.path.exists(args.save_dir) is not True:
        os.mkdir(args.save_dir)

    for epoch in range(args.epochs):

        print(optimizer.state_dict()['param_groups'][0]['lr'])
        acc, loss = train(train_loader, model, criterion, optimizer, epoch,
                          scheduler)

        # evaluate on validation set
        tacc, tloss = validate(val_loader, model, criterion)

        train_acc.append(acc)
        ta.append(tacc)

        # remember best prec@1 and save checkpoint
        is_best = tacc > best_prec1
        best_prec1 = max(tacc, best_prec1)

        if is_best:

            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_prec1': best_prec1,
                },
                is_best,
                filename=os.path.join(args.save_dir, 'best_model.pt'))

        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
            },
            is_best,
            filename=os.path.join(args.save_dir, 'model.pt'))

        plt.plot(train_acc, label='train_acc')
        plt.plot(ta, label='TA')
        plt.legend()
        plt.savefig(os.path.join(args.save_dir, 'net_train.png'))
        plt.close()

    model_path = os.path.join(args.save_dir, 'best_model.pt')
    model.load_state_dict(torch.load(model_path)['state_dict'])
    print('testing result of ta best model')
    tacc, tloss = validate(test_loader, model, criterion)