# model = model.eval()

    mean, std = get_mean_std(args.dataset)
    pad = int((row.padded_im_size - row.im_size) / 2)
    transform = transforms.Compose([
        transforms.Pad(pad),
        transforms.ToTensor(),
        transforms.Normalize(mean, std)
    ])
    if args.dataset in ['MNIST', 'FashionMNIST', 'CIFAR10', 'CIFAR100']:
        full_dataset = getattr(datasets, args.dataset)
        subset_dataset = get_subset_dataset(
            full_dataset=full_dataset,
            examples_per_class=args.examples_per_class,
            epc_seed=row.epc_seed,
            root=osp.join(args.dataset_root, args.dataset),
            train=True,
            transform=transform,
            download=True)
    elif args.dataset == 'STL10':
        full_dataset = datasets.STL10
        subset_dataset = get_subset_dataset(
            full_dataset=full_dataset,
            examples_per_class=args.examples_per_class,
            epc_seed=row.epc_seed,
            root=osp.join(args.dataset_root, args.dataset),
            split='train',
            transform=transform,
            download=False)
    else:
        raise Exception('Unknown dataset: {}'.format(args.dataset))
示例#2
0
# Transform
mean, std = get_mean_std(dataset)
pad = int((row.padded_im_size - row.im_size) / 2)
transform = transforms.Compose([
    transforms.Pad(pad),
    transforms.ToTensor(),
    transforms.Normalize(mean, std)
])

# Subsampled dataset
full_dataset = getattr(datasets, dataset)
subset_dataset = get_subset_dataset(full_dataset=full_dataset,
                                    examples_per_class=examples_per_class,
                                    epc_seed=row.epc_seed,
                                    root='data/',
                                    train=True,
                                    transform=transform,
                                    download=True)

# Loader
loader = DataLoader(dataset=subset_dataset,
                    drop_last=False,
                    batch_size=batch_size)

# In[6]:

#%% G decomposition

C = row.num_classes
示例#3
0
    else:
        train_transform = transforms.Compose([
            transforms.Pad(int((config.padded_im_size - config.im_size) / 2)),
            transforms.ToTensor(),
            transforms.Normalize(mean, std)
        ])
    test_transform = transforms.Compose([
        transforms.Pad((config.padded_im_size - config.im_size) // 2),
        transforms.ToTensor(),
        transforms.Normalize(mean, std)
    ])
    full_dataset = getattr(datasets, args.dataset)
    subset_dataset = get_subset_dataset(full_dataset=full_dataset,
                                        examples_per_class=40,
                                        epc_seed=config.epc_seed,
                                        root=osp.join(args.dataset_root,
                                                      args.dataset),
                                        train=True,
                                        transform=test_transform,
                                        download=True)

    loader = DataLoader(dataset=subset_dataset,
                        drop_last=False,
                        batch_size=args.batch_size)

    if args.model in [
            'VGG11_bn', 'ResNet18', 'DenseNet3_40', 'LeNet', 'MobileNet'
    ]:
        model = Network().construct(args.model, config)
    else:
        raise Exception('Unknown model: {}'.format())
示例#4
0
gridLabels = np.concatenate((trainingLabels, validationLabels))
indexValid = list(
    range(len(trainingLabels),
          len(trainingLabels) + len(validationLabels)))

# Train the classifiers on training sets and save the data in a specific folder

accuracy = []
indexName = []

# ========== Baseline Classifier ==========
print("baseline training...")
nameClassifier = "BaselineClassifier"
baseClassifier = baselineClassifier.BaselineClassifier()
trainedClassifier = baseClassifier.get_overlaps(
    utils.get_subset_dataset(dataset, trainingClaimSet + validationClaimSet))
thresholds = baseClassifier.calculate_classifier_thresholds(trainedClassifier)
# get the predicted labels
testingData = baseClassifier.get_overlaps(
    utils.get_subset_dataset(dataset, testingClaimSet))
#print(testingData)
#print(thresholds)
predLabels = baseClassifier.predict(thresholds, testingData)

# Get the metrics and save them
acc = utils.getAllMetricsAndSave(nameClassifier, predLabels, testingLabels)

# Accuracy
accuracy.append(acc)
indexName.append(nameClassifier)
    logger_file_handler = logging.FileHandler(logging_file)
    logger.addHandler(logger_file_handler)
    logger.info('Arguments: {}'.format(args))

    mean, std = get_mean_std(args.dataset)
    transform = transforms.Compose([
        transforms.Pad(0),
        transforms.ToTensor(),
        transforms.Normalize(mean, std)
    ])

    full_dataset = getattr(datasets, args.dataset)
    train_data = get_subset_dataset(full_dataset=full_dataset,
                                    examples_per_class=args.examples_per_class,
                                    epc_seed=args.epc_seed,
                                    root=osp.join(args.dataset_root,
                                                  args.dataset),
                                    train=True,
                                    transform=transform,
                                    download=True)

    test_data = get_subset_dataset(full_dataset=full_dataset,
                                   examples_per_class=args.examples_per_class,
                                   epc_seed=args.epc_seed,
                                   root=osp.join(args.dataset_root,
                                                 args.dataset),
                                   train=False,
                                   transform=transform,
                                   download=True)
    dataloaders = dict()
    dataloaders['train'] = data.DataLoader(train_data,
                                           batch_size=args.batch_size,
示例#6
0
def train(model,
          optimizer,
          scheduler,
          dataloaders,
          criterion,
          device,
          num_epochs=100,
          args=None,
          dataset_sizes={
              'train': 5e4,
              'test': 1e4
          },
          images_dir=None,
          ckpt_dir=None):

    logger = logging.getLogger('train')
    loss_list = {'train': list(), 'test': list()}
    acc_list = {'train': list(), 'test': list()}

    assert images_dir is not None
    assert ckpt_dir is not None

    loss_image_path = osp.join(images_dir, 'loss.png')
    acc_image_path = osp.join(images_dir, 'acc.png')

    model.train()

    full_eigenspectrums = list()
    epoch_eigenspectrums = list()

    full_eigenspectrums_path = osp.join(ckpt_dir,
                                        'training_eigenspectrum_full.npy')
    C = config.num_classes
    valid_layers = get_valid_layers(model)
    for epoch in range(num_epochs):
        logger.info('epoch: %d' % epoch)
        with torch.enable_grad():
            for batch, truth in dataloaders['train']:

                batch = batch.to(device)
                truth = truth.to(device)
                optimizer.zero_grad()

                output = model(batch)
                loss = criterion(output, truth)

                loss.backward()
                optimizer.step()

        scheduler.step()

        # updates finished for epochs

        mean, std = get_mean_std(args.dataset)
        pad = int((config.padded_im_size - config.im_size) / 2)
        transform = transforms.Compose([
            transforms.Pad(pad),
            transforms.ToTensor(),
            transforms.Normalize(mean, std)
        ])
        if args.dataset in ['MNIST', 'FashionMNIST', 'CIFAR10', 'CIFAR100']:
            full_dataset = getattr(datasets, args.dataset)
            subset_dataset = get_subset_dataset(
                full_dataset=full_dataset,
                examples_per_class=args.examples_per_class,
                epc_seed=config.epc_seed,
                root=osp.join(args.dataset_root, args.dataset),
                train=True,
                transform=transform,
                download=True)
        elif args.dataset in ['STL10', 'SVHN']:
            full_dataset = getattr(datasets, args.dataset)
            subset_dataset = get_subset_dataset(
                full_dataset=full_dataset,
                examples_per_class=args.examples_per_class,
                epc_seed=config.epc_seed,
                root=osp.join(args.dataset_root, args.dataset),
                split='train',
                transform=transform,
                download=True)
        else:
            raise Exception('Unknown dataset: {}'.format(args.dataset))

        loader = data.DataLoader(dataset=subset_dataset,
                                 drop_last=False,
                                 batch_size=args.batch_size)

        Hess = FullHessian(crit='CrossEntropyLoss',
                           loader=loader,
                           device=device,
                           model=model,
                           num_classes=C,
                           hessian_type='Hessian',
                           init_poly_deg=64,
                           poly_deg=128,
                           spectrum_margin=0.05,
                           poly_points=1024,
                           SSI_iters=128)

        Hess_eigval, \
        Hess_eigval_density = Hess.LanczosLoop(denormalize=True)

        full_eigenspectrums.append(Hess_eigval)
        full_eigenspectrums.append(Hess_eigval_density)

        for layer_name, _ in model.named_parameters():
            if layer_name not in valid_layers:
                continue

            Hess = LayerHessian(crit='CrossEntropyLoss',
                                loader=loader,
                                device=device,
                                model=model,
                                num_classes=C,
                                layer_name=layer_name,
                                hessian_type='Hessian',
                                init_poly_deg=64,
                                poly_deg=128,
                                spectrum_margin=0.05,
                                poly_points=1024,
                                SSI_iters=128)

            Hess_eigval, \
            Hess_eigval_density = Hess.LanczosLoop(denormalize=True)

            layerwise_eigenspectrums_path = osp.join(
                ckpt_dir,
                'training_eigenspectrums_epoch_{}_layer_{}.npz'.format(
                    epoch, layer_name))
            np.savez(layerwise_eigenspectrums_path,
                     eigval=Hess_eigval,
                     eigval_density=Hess_eigval_density)

        for phase in ['train', 'test']:

            stats = evaluate_model(model, criterion, dataloaders[phase],
                                   device, dataset_sizes[phase])

            loss_list[phase].append(stats['loss'])
            acc_list[phase].append(stats['acc'])

            logger.info('{}:'.format(phase))
            logger.info('\tloss:{}'.format(stats['loss']))
            logger.info('\tacc :{}'.format(stats['acc']))

            if phase == 'test':
                plt.clf()
                plt.plot(loss_list['test'], label='test_loss')
                plt.plot(loss_list['train'], label='train_loss')
                plt.legend()
                plt.savefig(loss_image_path)

                plt.clf()
                plt.plot(acc_list['test'], label='test_acc')
                plt.plot(acc_list['train'], label='train_acc')
                plt.legend()
                plt.savefig(acc_image_path)
                plt.clf()

    full_eigenspectrums = np.array(full_eigenspectrums)
    assert full_eigenspectrums.shape[0] % 2 == 0
    assert full_eigenspectrums.shape[0] // 2 == num_epochs
    np.save(full_eigenspectrums_path, full_eigenspectrums)
    return full_eigenspectrums