コード例 #1
0
ファイル: test_attention.py プロジェクト: rnsandeep/Attention
def load_attention_model(model_path, num_classes):
    model = AttnVGG(num_classes=num_classes,
                    attention=True,
                    normalize_attn=True)
    checkpoint = torch.load(model_path)
    model.load_state_dict(checkpoint['state_dict'])

    return model.to(device)
コード例 #2
0
        for i in range(bs):
            # do per channel l_inf normalization
            for ci in range(self.ncInput):
                l_inf_channel = delta_im[i,
                                         ci, :, :].detach().cpu().abs().max()
                mag_in_scaled_c = self.mag_in / (255.0 * std[ci])
                delta_im[i, ci, :, :].data *= torch.tensor(
                    np.minimum(1.0, mag_in_scaled_c /
                               l_inf_channel)).float().cuda()

        return delta_im

if __name__ == "__main__":
    print("======>load pretrained models")
    net = AttnVGG(num_classes=2,
                  attention=True,
                  normalize_attn=True,
                  vis=False)
    # net = VGG(num_classes=2, gap=False)
    modelFile = "/home/lrh/git/libadver/examples/IPIM-AttnModel/models/checkpoint.pth"
    testCSVFile = "/home/lrh/git/libadver/examples/IPIM-AttnModel/test.csv"
    trainCSVFile = "/home/lrh/git/libadver/examples/IPIM-AttnModel/train.csv"

    checkpoint = torch.load(modelFile)
    net.load_state_dict(checkpoint['state_dict'])
    pretrained_clf = nn.DataParallel(net).cuda()
    pretrained_clf.eval()

    print("=======>load ISIC2016 dataset")
    mean = (0.7012, 0.5517, 0.4875)
    std = (0.0942, 0.1331, 0.1521)
    normalize = Normalize(mean, std)
コード例 #3
0
def main():
    # load data
    print('\nloading the dataset ...')
    assert opt.dataset == "ISIC2016" or opt.dataset == "ISIC2017"
    if opt.dataset == "ISIC2016":
        normalize = Normalize((0.7012, 0.5517, 0.4875),
                              (0.0942, 0.1331, 0.1521))
    elif opt.dataset == "ISIC2017":
        normalize = Normalize((0.6820, 0.5312, 0.4736),
                              (0.0840, 0.1140, 0.1282))
    transform_test = torch_transforms.Compose([
        RatioCenterCrop(0.8),
        Resize((256, 256)),
        CenterCrop((224, 224)),
        ToTensor(), normalize
    ])
    testset = ISIC(csv_file='test.csv', transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=64,
                                             shuffle=False,
                                             num_workers=8)
    print('done\n')

    # load network
    print('\nloading the model ...')
    if not opt.no_attention:
        print('turn on attention ...')
        if opt.normalize_attn:
            print('use softmax for attention map ...')
        else:
            print('use sigmoid for attention map ...')
    else:
        print('turn off attention ...')

    net = AttnVGG(num_classes=2,
                  attention=not opt.no_attention,
                  normalize_attn=opt.normalize_attn)
    # net = VGG(num_classes=2, gap=False)
    checkpoint = torch.load('checkpoint.pth')
    net.load_state_dict(checkpoint['state_dict'])
    model = nn.DataParallel(net, device_ids=device_ids).to(device)
    model.eval()
    print('done\n')

    # testing
    print('\nstart testing ...\n')
    writer = SummaryWriter(opt.outf)
    total = 0
    correct = 0
    with torch.no_grad():
        with open('test_results.csv', 'wt', newline='') as csv_file:
            csv_writer = csv.writer(csv_file, delimiter=',')
            for i, data in enumerate(testloader, 0):
                images_test, labels_test = data['image'], data['label']
                images_test, labels_test = images_test.to(
                    device), labels_test.to(device)
                pred_test, __, __ = model(images_test)
                predict = torch.argmax(pred_test, 1)
                total += labels_test.size(0)
                correct += torch.eq(predict, labels_test).sum().double().item()
                # record test predicted responses
                responses = F.softmax(pred_test, dim=1).squeeze().cpu().numpy()
                responses = [responses[i] for i in range(responses.shape[0])]
                csv_writer.writerows(responses)
                # log images
                if opt.log_images:
                    I_test = utils.make_grid(images_test,
                                             nrow=8,
                                             normalize=True,
                                             scale_each=True)
                    writer.add_image('test/image', I_test, i)
                    # accention maps
                    if not opt.no_attention:
                        __, a1, a2 = model(images_test)
                        if a1 is not None:
                            attn1 = visualize_attn(
                                I_test,
                                a1,
                                up_factor=opt.base_up_factor,
                                nrow=8)
                            writer.add_image('test/attention_map_1', attn1, i)
                        if a2 is not None:
                            attn2 = visualize_attn(I_test,
                                                   a2,
                                                   up_factor=2 *
                                                   opt.base_up_factor,
                                                   nrow=8)
                            writer.add_image('test/attention_map_2', attn2, i)
    AP, AUC, precision_mean, precision_mel, recall_mean, recall_mel = compute_metrics(
        'test_results.csv', 'test.csv')
    print("\ntest result: accuracy %.2f%%" % (100 * correct / total))
    print(
        "\nmean precision %.2f%% mean recall %.2f%% \nprecision for mel %.2f%% recall for mel %.2f%%"
        % (100 * precision_mean, 100 * recall_mean, 100 * precision_mel,
           100 * recall_mel))
    print("\nAP %.4f AUC %.4f\n" % (AP, AUC))
コード例 #4
0
ファイル: train_seg.py プロジェクト: adoskk/IPMI2019-AttnMel
def main():
    # load data
    print('\nloading the dataset ...')
    assert opt.dataset == "ISIC2016" or opt.dataset == "ISIC2017"
    if opt.dataset == "ISIC2016":
        num_aug = 5
        normalize = Normalize((0.7012, 0.5517, 0.4875),
                              (0.0942, 0.1331, 0.1521))
    elif opt.dataset == "ISIC2017":
        num_aug = 2
        normalize = Normalize((0.6820, 0.5312, 0.4736),
                              (0.0840, 0.1140, 0.1282))
    if opt.over_sample:
        print('data is offline oversampled ...')
        train_file = 'train_oversample.csv'
    else:
        print('no offline oversampling ...')
        train_file = 'train.csv'
    im_size = 224
    transform_train = torch_transforms.Compose([
        RatioCenterCrop(0.8),
        Resize((256, 256)),
        RandomCrop((224, 224)),
        RandomRotate(),
        RandomHorizontalFlip(),
        RandomVerticalFlip(),
        ToTensor(), normalize
    ])
    transform_val = torch_transforms.Compose([
        RatioCenterCrop(0.8),
        Resize((256, 256)),
        CenterCrop((224, 224)),
        ToTensor(), normalize
    ])
    trainset = ISIC(csv_file=train_file, transform=transform_train)
    trainloader = torch.utils.data.DataLoader(
        trainset,
        batch_size=opt.batch_size,
        shuffle=True,
        num_workers=8,
        worker_init_fn=_worker_init_fn_(),
        drop_last=True)
    valset = ISIC(csv_file='val.csv', transform=transform_val)
    valloader = torch.utils.data.DataLoader(valset,
                                            batch_size=64,
                                            shuffle=False,
                                            num_workers=8)
    print('done\n')

    # load models
    print('\nloading the model ...')

    if not opt.no_attention:
        print('turn on attention ...')
        if opt.normalize_attn:
            print('use softmax for attention map ...')
        else:
            print('use sigmoid for attention map ...')
    else:
        print('turn off attention ...')

    net = AttnVGG(num_classes=2,
                  attention=not opt.no_attention,
                  normalize_attn=opt.normalize_attn)
    dice = DiceLoss()
    if opt.focal_loss:
        print('use focal loss ...')
        criterion = FocalLoss(gama=2., size_average=True, weight=None)
    else:
        print('use cross entropy loss ...')
        criterion = nn.CrossEntropyLoss()
    print('done\n')

    # move to GPU
    print('\nmoving models to GPU ...')
    model = nn.DataParallel(net, device_ids=device_ids).to(device)
    criterion.to(device)
    dice.to(device)
    print('done\n')

    # optimizer
    optimizer = optim.SGD(model.parameters(),
                          lr=opt.lr,
                          momentum=0.9,
                          weight_decay=1e-4,
                          nesterov=True)
    lr_lambda = lambda epoch: np.power(0.1, epoch // 10)
    scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)

    # training
    print('\nstart training ...\n')
    step = 0
    EMA_accuracy = 0
    AUC_val = 0
    writer = SummaryWriter(opt.outf)
    if opt.log_images:
        data_iter = iter(valloader)
        fixed_batch = next(data_iter)
        fixed_batch = fixed_batch['image'][0:16, :, :, :].to(device)
    for epoch in range(opt.epochs):
        torch.cuda.empty_cache()
        # adjust learning rate
        scheduler.step()
        current_lr = optimizer.param_groups[0]['lr']
        writer.add_scalar('train/learning_rate', current_lr, epoch)
        print("\nepoch %d learning rate %f\n" % (epoch + 1, current_lr))
        # run for one epoch
        for aug in range(num_aug):
            for i, data in enumerate(trainloader, 0):
                # warm up
                model.train()
                model.zero_grad()
                optimizer.zero_grad()
                inputs, seg, labels = data['image'], data['image_seg'], data[
                    'label']
                seg = seg[:, -1:, :, :]
                seg_1 = F.adaptive_avg_pool2d(seg,
                                              im_size // opt.base_up_factor)
                seg_2 = F.adaptive_avg_pool2d(
                    seg, im_size // opt.base_up_factor // 2)
                inputs, seg_1, seg_2, labels = inputs.to(device), seg_1.to(
                    device), seg_2.to(device), labels.to(device)
                # forward
                pred, a1, a2 = model(inputs)
                # backward
                loss_c = criterion(pred, labels)
                loss_seg1 = dice(a1, seg_1)
                loss_seg2 = dice(a2, seg_2)
                loss = loss_c + 0.001 * loss_seg1 + 0.01 * loss_seg2
                loss.backward()
                optimizer.step()
                # display results
                if i % 10 == 0:
                    model.eval()
                    pred, __, __ = model(inputs)
                    predict = torch.argmax(pred, 1)
                    total = labels.size(0)
                    correct = torch.eq(predict, labels).sum().double().item()
                    accuracy = correct / total
                    EMA_accuracy = 0.9 * EMA_accuracy + 0.1 * accuracy
                    writer.add_scalar('train/loss_c', loss_c.item(), step)
                    writer.add_scalar('train/loss_seg1', loss_seg1.item(),
                                      step)
                    writer.add_scalar('train/loss_seg2', loss_seg2.item(),
                                      step)
                    writer.add_scalar('train/accuracy', accuracy, step)
                    writer.add_scalar('train/EMA_accuracy', EMA_accuracy, step)
                    print(
                        "[epoch %d][aug %d/%d][iter %d/%d] loss_c %.4f loss_seg1 %.4f loss_seg2 %.4f accuracy %.2f%% EMA %.2f%%"
                        %
                        (epoch + 1, aug + 1, num_aug, i + 1, len(trainloader),
                         loss.item(), loss_seg1.item(), loss_seg2.item(),
                         (100 * accuracy), (100 * EMA_accuracy)))
                step += 1
        # the end of each epoch - validation results
        model.eval()
        total = 0
        correct = 0
        with torch.no_grad():
            with open('val_results.csv', 'wt', newline='') as csv_file:
                csv_writer = csv.writer(csv_file, delimiter=',')
                for i, data in enumerate(valloader, 0):
                    images_val, labels_val = data['image'], data['label']
                    images_val, labels_val = images_val.to(
                        device), labels_val.to(device)
                    pred_val, __, __ = model(images_val)
                    predict = torch.argmax(pred_val, 1)
                    total += labels_val.size(0)
                    correct += torch.eq(predict,
                                        labels_val).sum().double().item()
                    # record predictions
                    responses = F.softmax(pred_val,
                                          dim=1).squeeze().cpu().numpy()
                    responses = [
                        responses[i] for i in range(responses.shape[0])
                    ]
                    csv_writer.writerows(responses)
            AP, AUC, precision_mean, precision_mel, recall_mean, recall_mel = compute_metrics(
                'val_results.csv', 'val.csv')
            # save checkpoints
            print('\nsaving checkpoints ...\n')
            checkpoint = {
                'state_dict': model.module.state_dict(),
                'opt_state_dict': optimizer.state_dict(),
            }
            torch.save(checkpoint,
                       os.path.join(opt.outf, 'checkpoint_latest.pth'))
            if AUC > AUC_val:  # save optimal validation model
                torch.save(checkpoint, os.path.join(opt.outf,
                                                    'checkpoint.pth'))
                AUC_val = AUC
            # log scalars
            writer.add_scalar('val/accuracy', correct / total, epoch)
            writer.add_scalar('val/mean_precision', precision_mean, epoch)
            writer.add_scalar('val/mean_recall', recall_mean, epoch)
            writer.add_scalar('val/precision_mel', precision_mel, epoch)
            writer.add_scalar('val/recall_mel', recall_mel, epoch)
            writer.add_scalar('val/AP', AP, epoch)
            writer.add_scalar('val/AUC', AUC, epoch)
            print("\n[epoch %d] val result: accuracy %.2f%%" %
                  (epoch + 1, 100 * correct / total))
            print(
                "\nmean precision %.2f%% mean recall %.2f%% \nprecision for mel %.2f%% recall for mel %.2f%%"
                % (100 * precision_mean, 100 * recall_mean,
                   100 * precision_mel, 100 * recall_mel))
            print("\nAP %.4f AUC %.4f\n optimal AUC: %.4f" %
                  (AP, AUC, AUC_val))
            # log images
            if opt.log_images:
                print('\nlog images ...\n')
                I_train = utils.make_grid(inputs[0:16, :, :, :],
                                          nrow=4,
                                          normalize=True,
                                          scale_each=True)
                I_seg_1 = utils.make_grid(seg_1[0:16, :, :, :],
                                          nrow=4,
                                          normalize=True,
                                          scale_each=True)
                I_seg_2 = utils.make_grid(seg_2[0:16, :, :, :],
                                          nrow=4,
                                          normalize=True,
                                          scale_each=True)
                writer.add_image('train/image', I_train, epoch)
                writer.add_image('train/seg1', I_seg_1, epoch)
                writer.add_image('train/seg2', I_seg_2, epoch)
                if epoch == 0:
                    I_val = utils.make_grid(fixed_batch,
                                            nrow=4,
                                            normalize=True,
                                            scale_each=True)
                    writer.add_image('val/image', I_val, epoch)
            if opt.log_images and (not opt.no_attention):
                print('\nlog attention maps ...\n')
                # training data
                __, a1, a2 = model(inputs[0:16, :, :, :])
                if a1 is not None:
                    attn1 = visualize_attn(I_train,
                                           a1,
                                           up_factor=opt.base_up_factor,
                                           nrow=4)
                    writer.add_image('train/attention_map_1', attn1, epoch)
                if a2 is not None:
                    attn2 = visualize_attn(I_train,
                                           a2,
                                           up_factor=2 * opt.base_up_factor,
                                           nrow=4)
                    writer.add_image('train/attention_map_2', attn2, epoch)
                # val data
                __, a1, a2 = model(fixed_batch)
                if a1 is not None:
                    attn1 = visualize_attn(I_val,
                                           a1,
                                           up_factor=opt.base_up_factor,
                                           nrow=4)
                    writer.add_image('val/attention_map_1', attn1, epoch)
                if a2 is not None:
                    attn2 = visualize_attn(I_val,
                                           a2,
                                           up_factor=2 * opt.base_up_factor,
                                           nrow=4)
                    writer.add_image('val/attention_map_2', attn2, epoch)
コード例 #5
0
trainCSVFile = '/home/lrh/git/Evaluating_Robustness_Of_Deep_Medical_Models/Dermothsis/train.csv'
trainset = ISIC(csv_file=trainCSVFile, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=16,
                                          shuffle=True,
                                          num_workers=8,
                                          drop_last=True)

testCSVFile = '/home/lrh/git/Evaluating_Robustness_Of_Deep_Medical_Models/Dermothsis/test.csv'
testset = ISIC(csv_file=testCSVFile, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=16, shuffle=False)
print('\ndone')

print('======>loading the model')

net = AttnVGG(num_classes=2, attention=True, normalize_attn=False)
criterion = FocalLoss()
print('done\n')

print('\nmoving models to GPU')
clf = net.cuda()
clf = torch.nn.DataParallel(clf)
cudnn.benchmark = True
criterion = criterion.cuda()
print('done\n')

learningRate = 0.001
optimizer = optim.SGD(clf.parameters(),
                      lr=learningRate,
                      momentum=0.9,
                      weight_decay=1e-4,
コード例 #6
0
ファイル: train.py プロジェクト: brianlee99/CovidVGG
def main():
    # load data
    print('\nloading the dataset ...')

    train_results = pd.DataFrame(columns=[
        'EMA_accuracy',
        'accuracy',
        'learning_rate',
        'loss_c', 'epoch'
    ])
    val_results = pd.DataFrame(columns=[
        'AP', 'AUC', 'accuracy', 'mean_precision', 'mean_recall', 'precision_mel', 'sensitivity', 'specificity', 'epoch'
    ])

    # changed from 5 -> 1
    num_aug = 3
    normalize = Normalize((0.5500, 0.5506, 0.5520), (0.1788, 0.1786, 0.1787))

    if opt.over_sample:
        print('data is offline oversampled ...')
        train_file = 'train_oversample.csv'
    else:
        print('no offline oversampling ...')
        train_file = 'train.csv'
    transform_train = torch_transforms.Compose([
         RatioCenterCrop(0.8),
         Resize((256,256)),
         RandomCrop((224,224)),
         RandomRotate(),
         RandomTranslation(),
         RandomHorizontalFlip(),
         RandomAdjustContrast(),
         ToTensor(),
         normalize
    ])
    transform_val = torch_transforms.Compose([
         RatioCenterCrop(0.8),
         Resize((256,256)),
         CenterCrop((224,224)),
         ToTensor(),
         normalize
    ])
    trainset = ISIC(csv_file=train_file, transform=transform_train)
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=opt.batch_size, shuffle=True,
        num_workers=8, worker_init_fn=_worker_init_fn_(), drop_last=True)
    valset = ISIC(csv_file='val.csv', transform=transform_val)
    valloader = torch.utils.data.DataLoader(valset, batch_size=64, shuffle=False, num_workers=8)
    print('done\n')

    # load models
    print('\nloading the model ...')

    if not opt.no_attention:
        print('turn on attention ...')
        if opt.normalize_attn:
            print('use softmax for attention map ...')
        else:
            print('use sigmoid for attention map ...')
    else:
        print('turn off attention ...')

    net = AttnVGG(num_classes=2, attention=not opt.no_attention, normalize_attn=opt.normalize_attn)
    # net = VGG(num_classes=2, gap=False)

    if opt.focal_loss:
        print('use focal loss ...')
        criterion = FocalLoss(gama=2., size_average=True, weight=None)
    else:
        print('use cross entropy loss ...')
        criterion = nn.CrossEntropyLoss()
    print('done\n')

    # move to GPU
    print('\nmoving models to GPU ...')
    model = nn.DataParallel(net, device_ids=device_ids).to(device)
    criterion.to(device)
    print('done\n')

    # optimizer
    optimizer = optim.SGD(model.parameters(), lr=opt.lr, momentum=0.9, weight_decay=1e-4, nesterov=True)
    lr_lambda = lambda epoch : np.power(0.1, epoch//10)
    scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)

    # training
    print('\nstart training ...\n')
    step = 0
    EMA_accuracy = 0
    AUC_val = 0
    writer = SummaryWriter('logs/exp1')
    if opt.log_images:
        data_iter = iter(valloader)
        fixed_batch = next(data_iter)
        fixed_batch = fixed_batch['image'][0:16,:,:,:].to(device)

    for epoch in range(opt.epochs):
        torch.cuda.empty_cache()
        train_row = {'EMA_accuracy': 0, 'accuracy': 0, 'learning_rate': 0, 'loss_c': 0, 'epoch': epoch+1}
        val_row = {'AP': 0, 'AUC': 0, 'accuracy': 0, 'mean_precision': 0, 'mean_recall': 0, 'precision_mel': 0, 'sensitivity': 0, 'specificity': 0, 'epoch': epoch+1}

        current_lr = optimizer.param_groups[0]['lr']
        writer.add_scalar('train/learning_rate', current_lr, epoch)
        train_row['learning_rate'] = current_lr
        print("\nepoch %d learning rate %f\n" % (epoch+1, current_lr))
        # run for one epoch
        for aug in range(num_aug):
            # Added this line
            torch.cuda.empty_cache()
            for i, data in enumerate(trainloader, 0):
                torch.cuda.empty_cache()
                # warm up
                model.train()
                model.zero_grad()
                optimizer.zero_grad()
                inputs, labels = data['image'], data['label']
                inputs, labels = inputs.to(device), labels.to(device)
                # forward
                pred, __, __ = model(inputs)
                # backward
                loss = criterion(pred, labels)
                loss.backward()
                optimizer.step()
                # display results
                model.eval()
                pred, __, __ = model(inputs)
                predict = torch.argmax(pred, 1)
                total = labels.size(0)
                correct = torch.eq(predict, labels).sum().double().item()
                accuracy = correct / total
                EMA_accuracy = 0.9*EMA_accuracy + 0.1*accuracy
                writer.add_scalar('train/loss_c', loss.item(), step)
                writer.add_scalar('train/accuracy', accuracy, step)
                writer.add_scalar('train/EMA_accuracy', EMA_accuracy, step)
                train_row['loss_c'] = loss.item()
                train_row['accuracy'] = accuracy
                train_row['EMA_accuracy'] = EMA_accuracy
                print("[epoch %d][aug %d/%d][iter %d/%d] loss %.4f accuracy %.2f%% EMA_accuracy %.2f%%"
                    % (epoch+1, aug+1, num_aug, i+1, len(trainloader), loss.item(), (100*accuracy), (100*EMA_accuracy)))
                step += 1
        # adjust learning rate
        scheduler.step()
        # the end of each epoch - validation results
        model.eval()
        total = 0
        correct = 0
        with torch.no_grad():
            with open('val_results.csv', 'wt', newline='') as csv_file:
                csv_writer = csv.writer(csv_file, delimiter=',')
                for i, data in enumerate(valloader, 0):
                    images_val, labels_val = data['image'], data['label']
                    images_val, labels_val = images_val.to(device), labels_val.to(device)
                    pred_val, __, __ = model(images_val)
                    predict = torch.argmax(pred_val, 1)
                    total += labels_val.size(0)
                    correct += torch.eq(predict, labels_val).sum().double().item()
                    # record prediction
                    responses = F.softmax(pred_val, dim=1).squeeze().cpu().numpy()
                    responses = [responses[i] for i in range(responses.shape[0])]
                    csv_writer.writerows(responses)
            AP, AUC, precision_mean, precision_mel, recall_mean, sensitivity, specificity = compute_metrics('val_results.csv', 'val.csv')
            # save checkpoints
            print('\nsaving checkpoints ...\n')
            checkpoint = {
                'state_dict': model.module.state_dict(),
                'opt_state_dict': optimizer.state_dict(),
            }
            torch.save(checkpoint, os.path.join(opt.outf, 'checkpoint_latest.pth'))
            if AUC > AUC_val: # save optimal validation model
                torch.save(checkpoint, os.path.join(opt.outf,'checkpoint.pth'))
                AUC_val = AUC
            # log scalars
            accuracy = correct/total
            writer.add_scalar('val/accuracy', accuracy, epoch)
            writer.add_scalar('val/mean_precision', precision_mean, epoch)
            writer.add_scalar('val/mean_recall', recall_mean, epoch)
            writer.add_scalar('val/precision_mel', precision_mel, epoch)
            writer.add_scalar('val/sensitivity', sensitivity, epoch)
            writer.add_scalar('val/specificity', specificity, epoch)
            writer.add_scalar('val/AP', AP, epoch)
            writer.add_scalar('val/AUC', AUC, epoch)
            val_row['accuracy'] = accuracy
            val_row['mean_precision'] = precision_mean
            val_row['mean_recall'] = recall_mean
            val_row['precision_mel'] = precision_mel
            val_row['sensitivity'] = sensitivity
            val_row['specificity'] = specificity
            val_row['AP'] = AP
            val_row['AUC'] = AUC
            print("\n[epoch %d] val result: accuracy %.2f%%" % (epoch+1, 100*correct/total))
            print("\nmean precision %.2f%% mean recall %.2f%% \nprecision for mel %.2f%% recall for mel %.2f%% recall for normal %.2f%%" %
                    (100*precision_mean, 100*recall_mean, 100*precision_mel, 100*sensitivity, 100*specificity))
            print("\nAP %.4f AUC %.4f optimal AUC: %.4f\n" % (AP, AUC, AUC_val))
            # log images
            if opt.log_images:
                print('\nlog images ...\n')
                I_train = utils.make_grid(inputs[0:16,:,:,:], nrow=4, normalize=True, scale_each=True)
                writer.add_image('train/image', I_train, epoch)
                if epoch == 0:
                    I_val = utils.make_grid(fixed_batch, nrow=4, normalize=True, scale_each=True)
                    writer.add_image('val/image', I_val, epoch)
            if opt.log_images and (not opt.no_attention):
                print('\nlog attention maps ...\n')
                # training data
                __, a1, a2 = model(inputs[0:16,:,:,:])
                if a1 is not None:
                    attn1 = visualize_attn(I_train, a1, up_factor=opt.base_up_factor, nrow=4)
                    writer.add_image('train/attention_map_1', attn1, epoch)
                if a2 is not None:
                    attn2 = visualize_attn(I_train, a2, up_factor=2*opt.base_up_factor, nrow=4)
                    writer.add_image('train/attention_map_2', attn2, epoch)
                # val data
                __, a1, a2 = model(fixed_batch)
                if a1 is not None:
                    attn1 = visualize_attn(I_val, a1, up_factor=opt.base_up_factor, nrow=4)
                    writer.add_image('val/attention_map_1', attn1, epoch)
                if a2 is not None:
                    attn2 = visualize_attn(I_val, a2, up_factor=2*opt.base_up_factor, nrow=4)
                    writer.add_image('val/attention_map_2', attn2, epoch)
        # Append the row to the dataframes
        train_results = train_results.append(train_row, ignore_index=True)
        val_results = val_results.append(val_row, ignore_index=True)
    # write to csvs
    train_results.to_csv('train_scores.csv')
    val_results.to_csv('val_scores.csv')
コード例 #7
0
#weights = make_weights_for_balanced_classes(image_datasets['train'].imgs, len(image_datasets['train'].classes))
#weights = torch.DoubleTensor(weights)
#sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(weights))

dataloaders = {
    x: torch.utils.data.DataLoader(image_datasets[x],
                                   batch_size=BATCH_SIZE,
                                   shuffle=True,
                                   num_workers=16)
    for x in ['train']
}

num_classes = int(sys.argv[5])  # no of classes
no_of_epochs = int(sys.argv[6])  # no of epochs to train

net = AttnVGG(num_classes=num_classes, attention=True, normalize_attn=True)
#net = VGG(num_classes=num_classes)
net = net.to(device)

model = net  # nn.DataParallel(net, device_ids=device_ids).to(device)
dataset_sizes = {x: len(image_datasets[x]) for x in ['train']}
class_names = image_datasets['train'].classes

# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)

# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)

lr_lambda = lambda epoch: np.power(0.1, epoch // 10)
scheduler = lr_scheduler.LambdaLR(optimizer_ft, lr_lambda=lr_lambda)
コード例 #8
0
def main():
    modelFile = "/home/lrh/store/modelpath/adversarial_defense/Dermothsis/adv_training.pth"
    testCSVFile = "/home/lrh/git/libadver/examples/IPIM-AttnModel/test.csv"
    print("======>load pretrained models")
    net = AttnVGG(num_classes=2,
                  attention=True,
                  normalize_attn=False,
                  vis=False)
    # net = VGG(num_classes=2, gap=False)
    checkpoint = torch.load(modelFile)
    net.load_state_dict(checkpoint['state_dict'])
    pretrained_clf = nn.DataParallel(net).cuda()
    pretrained_clf.eval()

    print("=======>load ISIC2016 dataset")
    normalize = Normalize((0.7012, 0.5517, 0.4875), (0.0942, 0.1331, 0.1521))
    transform_test = torch_transforms.Compose([
        RatioCenterCrop(0.8),
        Resize((256, 256)),
        CenterCrop((224, 224)),
        ToTensor(), normalize
    ])
    testset = ISIC(csv_file=testCSVFile, transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=8,
                                             shuffle=False,
                                             num_workers=4)

    PGDAttack = attack.ProjectGradientDescent(model=pretrained_clf)

    gt = torch.FloatTensor()
    pred = torch.FloatTensor()
    pred_advx = torch.FloatTensor()

    for i, data in enumerate(testloader, 0):
        print(i)
        images_test, labels_test = data['image'], data['label']
        images_test = images_test.cuda()
        labels_test = labels_test.cuda()

        pgd_params['y'] = labels_test
        pgd_params['clip_max'] = torch.max(images_test)
        pgd_params['clip_min'] = torch.min(images_test)
        adv_x = PGDAttack.generate(images_test, **pgd_params)
        #        torchvision.utils.save_image(adv_x, 'pgd_image_show'+'/adv{}.jpg'.format(i), nrow = 50 ,normalize = True)

        outputs_x, _, _ = pretrained_clf(images_test)
        x_pred = torch.argmax(outputs_x, dim=1).float()
        outputs_advx, _, _ = pretrained_clf(adv_x)
        adv_pred = torch.argmax(outputs_advx, dim=1).float()
        labels_test = labels_test.float()
        gt = torch.cat((gt, labels_test.detach().cpu()), 0)
        pred = torch.cat((pred, x_pred.detach().cpu()), 0)
        pred_advx = torch.cat((pred_advx, adv_pred.detach().cpu()), 0)

    fpr, tpr, thresholds = roc_curve(gt, pred_advx)

    AUC_ROC = roc_auc_score(gt, pred_advx)
    # test_integral = np.trapz(tpr,fpr) #trapz is numpy integration
    print("\nArea under the ROC curve: " + str(AUC_ROC))
    # ROC_curve =plt.figure()
    # plt.plot(fpr,tpr,'-',label='Area Under the Curve (AUC = %0.4f)' % AUC_ROC)
    # plt.title('ROC curve')
    # plt.xlabel("FPR (False Positive Rate)")
    # plt.ylabel("TPR (True Positive Rate)")
    # plt.legend(loc="lower right")
    # plt.savefig("pgd_image_show/ROC.png")

    correct_acc = 0
    correct_fr = 0
    correct_acc = correct_acc + gt.eq(pred_advx).sum()
    correct_fr = correct_fr + pred.eq(pred_advx).sum()
    total = len(gt)

    print("adv_ACC: %.8f" % (float(correct_acc) / total))
    print("FR: %.8f" % (1 - float(correct_fr) / total))