def main(args):
    model = WideResNet(depth=16, widen_factor=8, drop_rate=0.4)

    training_losses, training_accuracies, validation_accuracies, validation_losses = train_model(
        model, args)

    with open('logs.csv', 'w', newline='') as csvfile:
        fieldnames = [
            'epoch', 'train_accuracy', 'train_loss', 'val_accuracy', 'val_loss'
        ]
        writer = csv.DictWriter(csvfile, fieldnames=fieldnames)

        writer.writeheader()
        for epoch in range(args.epochs):
            row = {
                'epoch': str(epoch),
                'train_accuracy': str(training_accuracies[epoch]),
                'train_loss': str(training_losses[epoch]),
                'val_accuracy': str(validation_accuracies[epoch]),
                'val_loss': str(validation_losses[epoch])
            }
            writer.writerow(row)
Ejemplo n.º 2
0
                                           shuffle=True,
                                           pin_memory=True,
                                           num_workers=32)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=args.batch_size,
                                          shuffle=False,
                                          pin_memory=True,
                                          num_workers=10)

if args.model == 'resnet18':
    cnn = ResNet18(num_classes=num_classes)
elif args.model == 'wideresnet':
    if args.dataset == 'svhn':
        cnn = WideResNet(depth=16,
                         num_classes=num_classes,
                         widen_factor=8,
                         dropRate=0.4)
    else:
        cnn = WideResNet(depth=28,
                         num_classes=num_classes,
                         widen_factor=10,
                         dropRate=0.3)

cnn = cnn.cuda()
criterion = nn.CrossEntropyLoss().cuda()
cnn_optimizer = torch.optim.SGD(cnn.parameters(),
                                lr=args.learning_rate,
                                momentum=0.9,
                                nesterov=True,
                                weight_decay=5e-4)
Ejemplo n.º 3
0
                                           shuffle=True,
                                           pin_memory=True,
                                           num_workers=2)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=args.batch_size,
                                          shuffle=False,
                                          pin_memory=True,
                                          num_workers=2)

if args.model == 'resnet18':
    cnn = ResNet18(num_classes=num_classes, drop=args.drop)
elif args.model == 'wideresnet':
    if args.dataset == 'svhn':
        cnn = WideResNet(depth=16,
                         num_classes=num_classes,
                         widen_factor=4,
                         dropRate=args.drop)
    else:
        cnn = WideResNet(depth=28,
                         num_classes=num_classes,
                         widen_factor=10,
                         dropRate=args.drop)

cnn = cnn.cuda()
criterion = nn.CrossEntropyLoss().cuda()
cnn_optimizer = torch.optim.SGD(cnn.parameters(),
                                lr=args.learning_rate,
                                momentum=0.9,
                                nesterov=True,
                                weight_decay=5e-4)
Ejemplo n.º 4
0
                                           batch_size=args.batch_size,
                                           shuffle=True,
                                           pin_memory=True,
                                           num_workers=2)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=args.batch_size,
                                          shuffle=False,
                                          pin_memory=True,
                                          num_workers=2)

if args.model == 'resnet18':
    cnn = ResNet18(num_classes=num_classes)
elif args.model == 'wideresnet':
    if args.dataset == 'svhn':
        cnn = WideResNet(depth=16, num_classes=num_classes, widen_factor=8,
                         dropRate=0.4)
    else:
        cnn = WideResNet(depth=28, num_classes=num_classes, widen_factor=10,
                         dropRate=0.3)

cnn = cnn.cuda()
criterion = nn.CrossEntropyLoss().cuda()
cnn_optimizer = torch.optim.SGD(cnn.parameters(), lr=args.learning_rate,
                                momentum=0.9, nesterov=True, weight_decay=5e-4)

if args.dataset == 'svhn':
    scheduler = MultiStepLR(cnn_optimizer, milestones=[80, 120], gamma=0.1)
else:
    scheduler = MultiStepLR(cnn_optimizer, milestones=[60, 120, 160], gamma=0.2)

if not os.path.exists(args.output_dir):
Ejemplo n.º 5
0
def run_cutout(dataset="cifar10",
               model="resnet18",
               epochs=200,
               batch_size=128,
               learning_rate=0.1,
               data_augmentation=False,
               cutout=False,
               n_holes=1,
               length=8,
               no_cuda=False,
               seed=0):
    cuda = not no_cuda and torch.cuda.is_available()
    cudnn.benchmark = True  # Should make training should go faster for large models

    torch.manual_seed(seed)
    if cuda:
        torch.cuda.manual_seed(seed)

    test_id = dataset + '_' + model

    # Image Preprocessing
    if dataset == 'svhn':
        normalize = transforms.Normalize(
            mean=[x / 255.0 for x in [109.9, 109.7, 113.8]],
            std=[x / 255.0 for x in [50.1, 50.6, 50.8]])
    else:
        normalize = transforms.Normalize(
            mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
            std=[x / 255.0 for x in [63.0, 62.1, 66.7]])

    train_transform = transforms.Compose([])
    if data_augmentation:
        train_transform.transforms.append(transforms.RandomCrop(32, padding=4))
        train_transform.transforms.append(transforms.RandomHorizontalFlip())
    train_transform.transforms.append(transforms.ToTensor())
    train_transform.transforms.append(normalize)
    if cutout:
        train_transform.transforms.append(
            Cutout(n_holes=n_holes, length=length))

    test_transform = transforms.Compose([transforms.ToTensor(), normalize])

    if dataset == 'cifar10':
        num_classes = 10
        train_dataset = datasets.CIFAR10(root='data/',
                                         train=True,
                                         transform=train_transform,
                                         download=True)

        test_dataset = datasets.CIFAR10(root='data/',
                                        train=False,
                                        transform=test_transform,
                                        download=True)
    elif dataset == 'cifar100':
        num_classes = 100
        train_dataset = datasets.CIFAR100(root='data/',
                                          train=True,
                                          transform=train_transform,
                                          download=True)

        test_dataset = datasets.CIFAR100(root='data/',
                                         train=False,
                                         transform=test_transform,
                                         download=True)
    elif dataset == 'svhn':
        num_classes = 10
        train_dataset = datasets.SVHN(root='data/',
                                      split='train',
                                      transform=train_transform,
                                      download=True)

        extra_dataset = datasets.SVHN(root='data/',
                                      split='extra',
                                      transform=train_transform,
                                      download=True)

        # Combine both training splits (https://arxiv.org/pdf/1605.07146.pdf)
        data = np.concatenate([train_dataset.data, extra_dataset.data], axis=0)
        labels = np.concatenate([train_dataset.labels, extra_dataset.labels],
                                axis=0)
        train_dataset.data = data
        train_dataset.labels = labels

        test_dataset = datasets.SVHN(root='data/',
                                     split='test',
                                     transform=test_transform,
                                     download=True)

    # Data Loader (Input Pipeline)
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               pin_memory=True,
                                               num_workers=2)

    test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                              batch_size=batch_size,
                                              shuffle=False,
                                              pin_memory=True,
                                              num_workers=2)

    if model == 'resnet18':
        cnn = ResNet18(num_classes=num_classes)
    elif model == 'wideresnet':
        if dataset == 'svhn':
            cnn = WideResNet(depth=16,
                             num_classes=num_classes,
                             widen_factor=8,
                             dropRate=0.4)
        else:
            cnn = WideResNet(depth=28,
                             num_classes=num_classes,
                             widen_factor=10,
                             dropRate=0.3)

    cnn = cnn.cuda()
    criterion = nn.CrossEntropyLoss().cuda()

    cnn_optimizer = torch.optim.SGD(cnn.parameters(),
                                    lr=learning_rate,
                                    momentum=0.9,
                                    nesterov=True,
                                    weight_decay=5e-4)

    if dataset == 'svhn':
        scheduler = MultiStepLR(cnn_optimizer, milestones=[80, 120], gamma=0.1)
    else:
        scheduler = MultiStepLR(cnn_optimizer,
                                milestones=[60, 120, 160],
                                gamma=0.2)

    #TODO: change path to relative path
    filename = "/beegfs/work/workspace/ws/fr_mn119-augment-0/logs/{}.csv".format(
        test_id)
    # filename = 'logs/' + test_id + '.csv'

    args = argparse.Namespace(
        **{
            "dataset": dataset,
            "model": model,
            "epochs": epochs,
            "batch_size": batch_size,
            "learning_rate": learning_rate,
            "data_augmentation": data_augmentation,
            "cutout": cutout,
            "n_holes": n_holes,
            "length": length,
            "no_cuda": no_cuda,
            "seed": seed
        })

    csv_logger = CSVLogger(args=args,
                           fieldnames=['epoch', 'train_acc', 'test_acc'],
                           filename=filename)

    def test(loader):
        cnn.eval()  # Change model to 'eval' mode (BN uses moving mean/var).
        correct = 0.
        total = 0.
        for images, labels in loader:
            if dataset == 'svhn':
                # SVHN labels are from 1 to 10, not 0 to 9, so subtract 1
                labels = labels.type_as(torch.LongTensor()).view(-1) - 1

            images = Variable(images, volatile=True).cuda()
            labels = Variable(labels, volatile=True).cuda()

            pred = cnn(images)

            pred = torch.max(pred.data, 1)[1]
            total += labels.size(0)
            correct += (pred == labels.data).sum()

        val_acc = correct / total
        cnn.train()
        return val_acc

    for epoch in range(epochs):

        xentropy_loss_avg = 0.
        correct = 0.
        total = 0.

        progress_bar = tqdm(train_loader)
        for i, (images, labels) in enumerate(progress_bar):
            progress_bar.set_description('Epoch ' + str(epoch))

            if dataset == 'svhn':
                # SVHN labels are from 1 to 10, not 0 to 9, so subtract 1
                labels = labels.type_as(torch.LongTensor()).view(-1) - 1

            images = Variable(images).cuda(async=True)
            labels = Variable(labels).cuda(async=True)

            cnn.zero_grad()
            pred = cnn(images)

            xentropy_loss = criterion(pred, labels)
            xentropy_loss.backward()
            cnn_optimizer.step()

            xentropy_loss_avg += xentropy_loss.data[0]

            # Calculate running average of accuracy
            _, pred = torch.max(pred.data, 1)
            total += labels.size(0)
            correct += (pred == labels.data).sum()
            accuracy = correct / total

            progress_bar.set_postfix(xentropy='%.3f' % (xentropy_loss_avg /
                                                        (i + 1)),
                                     acc='%.3f' % accuracy)

        test_acc = test(test_loader)
        tqdm.write('test_acc: %.3f' % (test_acc))

        scheduler.step(epoch)

        row = {
            'epoch': str(epoch),
            'train_acc': str(accuracy),
            'test_acc': str(test_acc)
        }
        csv_logger.writerow(row)

    # torch.save(cnn.state_dict(), 'checkpoints/' + test_id + '.pt')
    csv_logger.close()

    results = {
        'epoch': epoch,
        'train_error': 1 - accuracy,
        'test_error': 1 - test_acc
    }

    # validation error for hyperband
    return results
Ejemplo n.º 6
0
                                           batch_size=args.batch_size,
                                           shuffle=True,
                                           pin_memory=True,
                                           num_workers=2)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=args.batch_size,
                                          shuffle=False,
                                          pin_memory=True,
                                          num_workers=2)

if args.model == 'resnet18':
    cnn = ResNet18(num_classes=num_classes)
elif args.model == 'wideresnet':
    if args.dataset == 'svhn':
        cnn = WideResNet(depth=16, num_classes=num_classes, widen_factor=8,
                         dropRate=0.4)
    else:
        cnn = WideResNet(depth=28, num_classes=num_classes, widen_factor=10,
                         dropRate=0.3)

cnn = cnn.cuda()
criterion = nn.CrossEntropyLoss().cuda()

#cnn_optimizer = torch.optim.SGD(cnn.parameters(), lr=args.learning_rate,
#                                momentum=0.9, nesterov=True, weight_decay=5e-4)

cnn_optimizer = torch.optim.Adam(cnn.parameters(), lr=args.learning_rate, betas=(0.9, 0.999), eps=1e-08, weight_decay=5e-4, amsgrad=False)

if args.dataset == 'svhn':
    #scheduler = MultiStepLR(cnn_optimizer, milestones=[80, 120], gamma=0.1)
    gamma=0.1
Ejemplo n.º 7
0
                                           shuffle=True,
                                           pin_memory=True,
                                           num_workers=2)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=args.batch_size,
                                          shuffle=False,
                                          pin_memory=True,
                                          num_workers=2)

if args.model == 'resnet18':
    cnn = ResNet18(num_classes=num_classes)
elif args.model == 'wideresnet':
    if args.dataset == 'svhn':
        cnn = WideResNet(depth=16,
                         num_classes=num_classes,
                         widen_factor=8,
                         dropRate=0.4)
    else:
        cnn = WideResNet(depth=28,
                         num_classes=num_classes,
                         widen_factor=10,
                         dropRate=0.3)

cnn = cnn.cuda()
criterion = nn.CrossEntropyLoss().cuda()
cnn_optimizer = torch.optim.SGD(cnn.parameters(),
                                lr=args.learning_rate,
                                momentum=0.9,
                                nesterov=True,
                                weight_decay=5e-4)
Ejemplo n.º 8
0
                                           pin_memory=True,
                                           num_workers=2)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=args.batch_size,
                                          shuffle=False,
                                          pin_memory=True,
                                          num_workers=2)

if args.model == 'resnet18':
    cnn = ResNet18(num_classes=num_classes)
if args.model == 'resnet10':
    cnn = ResNet10(num_classes=num_classes)
elif args.model == 'wideresnet':
    if args.dataset == 'svhn':
        cnn = WideResNet(depth=16, num_classes=num_classes, widen_factor=8,
                         dropRate=0.4)
    else:
        cnn = WideResNet(depth=28, num_classes=num_classes, widen_factor=10,
                         dropRate=0.3)

cnn = cnn.cuda()
criterion = nn.CrossEntropyLoss().cuda()
cnn_optimizer = torch.optim.SGD(cnn.parameters(), lr=args.learning_rate,
                                momentum=0.9, nesterov=True, weight_decay=5e-4)

if args.dataset == 'svhn':
    scheduler = MultiStepLR(cnn_optimizer, milestones=[80, 120], gamma=0.1)
else:
    scheduler = MultiStepLR(cnn_optimizer, milestones=[60, 120, 160], gamma=0.2)

log_dir = 'logs'
Ejemplo n.º 9
0
    cnn = ResNet50(num_classes=num_classes)
elif 'vgg' in args.model:
    cnn = vgg_dict[args.model](num_classes=num_classes)
elif 'Mobile' in args.model:
    cnn = mobile_half(num_classes=num_classes)
elif 'ShuffleV1' in args.model:
    cnn = ShuffleV1(num_classes=num_classes)
elif 'ShuffleV2' in args.model:
    cnn = ShuffleV2(num_classes=num_classes)
elif 'wrn' in args.model:
    cnn = wrn(depth=int(args.model[4:6]),
              widen_factor=int(args.model[-1:]),
              num_classes=num_classes)
elif args.model == 'wideresnet':
    cnn = WideResNet(depth=28,
                     num_classes=num_classes,
                     widen_factor=10,
                     dropRate=0.3)
else:
    assert False

if 'Shuffle' in args.model or 'Mobile' in args.model:
    # args.lr = 0.02 ## ReviewKD
    args.lr = 0.01  ## to be consistent with RepDistiller

trainable_parameters = nn.ModuleList()
trainable_parameters.append(cnn)

criterion = nn.CrossEntropyLoss().cuda()
kl_criterion = DistillKL(args.T)
wd = args.wd
lr = args.lr
Ejemplo n.º 10
0
elif args.model == 'resnet50':
    from resnet import resnet50
    cnn = resnet50(num_classes)
    print(cnn)
elif args.model == 'resnext50':
    from resnext import resnext50
    cnn = resnext50(num_classes)
    print(cnn)
elif args.model == 'densenet121':
    from densenet import densenet121
    cnn = densenet121(num_classes)
    print(cnn)
elif args.model == 'wideresnet':
    if args.dataset == 'svhn':
        cnn = WideResNet(depth=16,
                         num_classes=num_classes,
                         widen_factor=8,
                         dropRate=0.4)
    else:
        cnn = WideResNet(depth=28,
                         num_classes=num_classes,
                         widen_factor=10,
                         dropRate=0.3)
elif args.model == 'wideresnet101':
    cnn = torch.hub.load('pytorch/vision:v0.5.0',
                         'wide_resnet101_2',
                         pretrained=False,
                         num_classes=num_classes)

cnn = cnn.cuda()
cnn = torch.nn.DataParallel(cnn).cuda()
criterion = nn.CrossEntropyLoss().cuda()
Ejemplo n.º 11
0
elif 'resnet' in args.model:
    cnn = build_resnet_backbone(depth = int(args.model[6:]), num_classes=num_classes)
elif 'ResNet50' in args.model:
    cnn = ResNet50(num_classes=num_classes)
elif 'vgg' in args.model:
    cnn = build_vgg_backbone(depth = int(args.model[3:]), num_classes=num_classes)
elif 'mobile' in args.model:
    cnn = mobile_half(num_classes=num_classes)
elif 'shufflev1' in args.model:
    cnn = ShuffleV1(num_classes=num_classes)
elif 'shufflev2' in args.model:
    cnn = ShuffleV2(num_classes=num_classes)
elif 'wrn' in args.model:
    cnn = wrn(depth = int(args.model[4:6]), widen_factor = int(args.model[-1:]), num_classes=num_classes)
elif args.model == 'wideresnet':
    cnn = WideResNet(depth=28, num_classes=num_classes, widen_factor=10,
                         dropRate=0.3)
else:
    assert False

if 'shuffle' in args.model or 'mobile' in args.model:
    args.lr = 0.02

trainable_parameters = nn.ModuleList()
trainable_parameters.append(cnn)

criterion = nn.CrossEntropyLoss().cuda()
kl_criterion = DistillKL(args.T)
wd = args.wd
lr = args.lr
cnn_optimizer = torch.optim.SGD(trainable_parameters.parameters(), lr=args.lr,
                                momentum=0.9, nesterov=True, weight_decay=wd)
Ejemplo n.º 12
0
def main(args):
    # Enter all arguments that you want to be in the filename of the saved output
    ordered_args = [
        'dataset',
        'data_augmentation',
        'seed',
        'remove_percent',
        'burn_in_epochs',
        'remove_strategy',
        'noise_percent',
        'noise_labels',
        'noise_pixels_percent',
        'noise_pixels_std',
        'optimizer',
        'learning_rate',
    ]
    save_fname = '__'.join('{}_{}'.format(arg, args_dict[arg])
                           for arg in ordered_args)
    fname = os.path.join(args.output_dir, save_fname)
    if os.path.exists(fname + '__stats_dict.pkl'):
        redo = input(
            "There exists experiment result already, continue? [yes/no] ")
        if redo == 'no':
            exit()
        elif redo == 'yes':
            pass
        else:
            raise ValueError('wrong answer')

    os.makedirs(args.output_dir, exist_ok=True)

    # Set appropriate devices
    device = torch.device(args.device)
    print('run on device: {0}'.format(device))
    cudnn.benchmark = True  # Should make training go faster for large models

    # Set random seed for initialization
    torch.manual_seed(args.seed)
    if 'cuda' in args.device:
        torch.cuda.manual_seed(args.seed)
    npr.seed(args.seed)

    train_ds, test_ds, num_classes = get_data(args.dataset)

    if args.noise_percent > 0:
        assert not (args.noise_labels and (args.noise_pixels_percent > 0))
        if args.noise_labels:
            train_ds, noise_indexes = noise_labels(train_ds,
                                                   args.noise_percent, fname)
        if args.noise_pixels_percent:
            train_ds, noise_indexes = noise_pixels(train_ds,
                                                   args.noise_percent,
                                                   args.noise_pixels_percent,
                                                   args.noise_pixels_std,
                                                   fname)

    print('Training on ' + str(len(train_ds)) + ' examples')

    # Setup model
    if args.model == 'resnet18':
        model = ResNet18(num_classes=num_classes)
    elif args.model == 'wideresnet':
        if args.dataset == 'svhn':
            model = WideResNet(depth=16,
                               num_classes=num_classes,
                               widen_factor=8,
                               dropRate=0.4)
        else:
            model = WideResNet(depth=28,
                               num_classes=num_classes,
                               widen_factor=10,
                               dropRate=0.3)
    elif args.model == 'cnn':
        model = CNN(num_classes=num_classes)
    else:
        print(
            'Specified model not recognized. Options are: resnet18 and wideresnet'
        )

    # Setup loss
    model = model.to(args.device)
    criterion = torch.nn.CrossEntropyLoss().cuda()
    criterion.__init__(reduce=False)

    # Setup optimizer
    if args.optimizer == 'adam':
        model_optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    elif args.optimizer == 'sgd':
        model_optimizer = torch.optim.SGD(model.parameters(),
                                          lr=args.learning_rate,
                                          momentum=0.9,
                                          nesterov=True,
                                          weight_decay=5e-4)
        scheduler = MultiStepLR(model_optimizer,
                                milestones=[60, 120, 160],
                                gamma=0.2)
    elif args.optimizer == 'sgd-const-lr':
        model_optimizer = torch.optim.SGD(model.parameters(),
                                          lr=args.learning_rate,
                                          momentum=0.9,
                                          nesterov=True,
                                          weight_decay=5e-4)
    else:
        print('Specified optimizer not recognized. Options are: adam and sgd')

    save_point = os.path.join(args.output_dir, 'checkpoint', args.dataset)
    os.makedirs(save_point, exist_ok=True)
    checkpoint_fname = os.path.join(save_point, save_fname + '.t7')

    # Initialize dictionary to save statistics for every example presentation
    example_stats = {}
    num_examples = len(train_ds)
    example_weights = np.ones(num_examples)

    elapsed_time = 0
    # train_idx = np.array(range(0, len(train_ds)))
    train_loader = DataLoader(train_ds,
                              batch_size=args.batch_size,
                              shuffle=True)
    for epoch in range(args.epochs):
        if args.remove_strategy != 'normal' and epoch >= args.burn_in_epochs:
            if 'sampling' in args.remove_strategy:
                # sampling by weight
                normalized_weights = example_weights / example_weights.sum()
                index_stats = example_stats.get('example_weights', [[], []])
                index_stats[1].append(normalized_weights)
                example_stats['example_weights'] = index_stats

                choice_num = int(num_examples *
                                 (1 - args.remove_percent / 100))
                train_idx = np.random.choice(range(num_examples),
                                             size=choice_num,
                                             replace=False,
                                             p=normalized_weights)
            elif args.remove_strategy == 'low-acc':
                remove_n = int(args.remove_percent * num_examples / 100)
                losses = []
                for idx in range(num_examples):
                    losses.append(example_stats[idx][0][epoch - 1])
                losses = np.array(losses)
                sorted_indexes = np.argsort(losses)
                train_idx = sorted_indexes[:num_examples - remove_n]
            elif args.remove_strategy == 'all-noise':
                remove_n = int(args.remove_percent * num_examples / 100)
                if args.remove_percent <= args.noise_percent_labels:
                    remove_indexes = npr.choice(noise_indexes,
                                                remove_n,
                                                replace=False)
                    train_idx = np.setdiff1d(range(num_examples),
                                             remove_indexes)
                else:
                    train_idx = np.setdiff1d(range(num_examples),
                                             noise_indexes)
                    train_idx = npr.choice(train_idx,
                                           num_examples - remove_n,
                                           replace=False)
            else:
                # event method
                _, unlearned_per_presentation, _, first_learned = compute_forgetting_statistics(
                    example_stats, epoch)
                ordered_examples, ordered_values = sort_examples_by_forgetting(
                    [unlearned_per_presentation], [first_learned], epoch)
                train_idx = sample_dataset_by_forgetting(
                    train_ds, ordered_examples, ordered_values,
                    args.remove_percent, args.remove_strategy)
            sampler = torch.utils.data.SubsetRandomSampler(train_idx)
            train_loader = DataLoader(train_ds,
                                      batch_size=args.batch_size,
                                      sampler=sampler)

        start_time = time.time()
        train(args, model, criterion, device, train_loader, model_optimizer,
              epoch, example_stats)

        test_loader = DataLoader(test_ds, batch_size=32, shuffle=True)
        test(epoch, model, criterion, device, test_loader, example_stats,
             checkpoint_fname)

        if args.remove_strategy != 'normal' and epoch >= args.burn_in_epochs:
            # evaluate on removed data
            removed_idx = np.setdiff1d(range(num_examples), train_idx)
            sampler = torch.utils.data.SubsetRandomSampler(removed_idx)
            removed_loader = DataLoader(train_ds,
                                        batch_size=args.batch_size,
                                        sampler=sampler)
            evaluate_on_removed(model, criterion, device, removed_loader,
                                epoch, example_stats)

        if 'sampling' in args.remove_strategy:
            example_weights = update_example_weights(example_weights,
                                                     example_stats, epoch,
                                                     args.remove_strategy)

        epoch_time = time.time() - start_time
        elapsed_time += epoch_time
        print('| Elapsed time : %d:%02d:%02d' % (get_hms(elapsed_time)))

        # Update optimizer step
        if args.optimizer == 'sgd':
            scheduler.step(epoch)

        # Save the stats dictionary
        fname = os.path.join(args.output_dir, save_fname)
        with open(fname + "__stats_dict.pkl", "wb") as f:
            pickle.dump(example_stats, f)

        # Log the best train and test accuracy so far
        with open(fname + "__best_acc.txt", "w") as f:
            f.write('train test \n')
            f.write(str(max(example_stats['train'][1])))
            f.write(' ')
            f.write(str(max(example_stats['test'][1])))
Ejemplo n.º 13
0
                                           shuffle=True,
                                           pin_memory=True,
                                           num_workers=2)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=args.batch_size,
                                          shuffle=False,
                                          pin_memory=True,
                                          num_workers=2)

if args.model == 'resnet18':
    cnn = ResNet18(num_classes=num_classes)
elif args.model == 'wideresnet':
    if args.dataset == 'svhn':
        cnn = WideResNet(depth=16,
                         num_classes=num_classes,
                         widen_factor=8,
                         dropRate=0.4)
    else:
        cnn = WideResNet(depth=28,
                         num_classes=num_classes,
                         widen_factor=10,
                         dropRate=0.3)

#cnn = torch.nn.DataParallel(cnn).cuda()
cnn = cnn.cuda()
criterion = nn.CrossEntropyLoss().cuda()
cnn_optimizer = torch.optim.SGD(cnn.parameters(),
                                lr=args.learning_rate,
                                momentum=0.9,
                                nesterov=True,
                                weight_decay=5e-4)
Ejemplo n.º 14
0
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=args.batch_size,
                                           shuffle=True,
                                           pin_memory=True,
                                           num_workers=2)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=args.batch_size,
                                          shuffle=False,
                                          pin_memory=True,
                                          num_workers=2)

if args.model == 'resnet18':
    cnn = ResNet18(num_classes=num_classes)
elif args.model == 'wideresnet':
    cnn = WideResNet(depth=28, num_classes=num_classes, widen_factor=10,
                     dropRate=0.3)
elif args.model == 'resnet20':
    cnn = resnet20(num_classes=num_classes)
elif args.model == 'vgg16':
    cnn = vgg16_bn(num_classes=num_classes)


# Wrap model if using input dropout.
if args.input_dropout:
    print('Wrapping model with input dropout.')
    cnn = augmentations.ModelWithInputDropout(
        cnn,
        args.keep_prob,
        num_samples=args.num_samples,
    )
Ejemplo n.º 15
0
                                           batch_size=args.batch_size,
                                           pin_memory=True,
                                           num_workers=2,
                                           sampler=get_sampler_classifier(train_loader, args.seed, args.prop))


test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=args.batch_size,
                                          shuffle=False,
                                          pin_memory=True,
                                          num_workers=2)

if args.model == 'resnet18':
    cnn = ResNet18(num_classes=num_classes, num_channels=num_channels)
elif args.model == 'wideresnet':
    cnn = WideResNet(depth=28, num_classes=num_classes, num_channels=num_channels, widen_factor=10, dropRate=0.5)

cnn = cnn.cuda()
criterion = nn.CrossEntropyLoss().cuda()
cnn_optimizer = torch.optim.SGD(cnn.parameters(), lr=args.learning_rate,
                                momentum=0.9, nesterov=True, weight_decay=5e-4)

scheduler = MultiStepLR(cnn_optimizer, milestones=[round(0.4*args.epochs), round(0.8*args.epochs)], gamma=0.1)

def test(loader):
    cnn.eval()    # Change model to 'eval' mode (BN uses moving mean/var).
    correct = 0.
    total = 0.
    for images, labels in loader:
        images = images.cuda()
        labels = labels.cuda()
Ejemplo n.º 16
0
                                                num_workers=2)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          shuffle=False,
                                          pin_memory=True,
                                          num_workers=2)

# ---------------------------------------------------------------------------------------
# Choose the Model
# ---------------------------------------------------------------------------------------
print("Configuring  Model {}".format('*' * 80))
if args.model == 'resnet18':
    cnn = ResNet18(num_classes=num_classes)
elif args.model == 'wideresnet':
    cnn = WideResNet(depth=28,
                     num_classes=num_classes,
                     widen_factor=10,
                     dropRate=0.3)
elif args.model == 'densenet':
    cnn = DenseNet3(depth=100,
                    num_classes=num_classes,
                    growth_rate=12,
                    bottleneck=False,
                    dropRate=0)

cnn = cnn.cuda()
criterion = nn.CrossEntropyLoss().cuda()
cnn_optimizer = torch.optim.SGD(cnn.parameters(),
                                lr=args.learning_rate,
                                momentum=0.9,
                                nesterov=True,
                                weight_decay=5e-4)