Example #1
0
def main():
    dataset = CIFAR10(
        binary=True, validation_split=0.0)  # not using validation for anything
    model = mobilenet_v2_like(dataset.input_shape, dataset.num_classes)

    model.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                  optimizer=SGDW(lr=0.01, momentum=0.9, weight_decay=1e-5),
                  metrics=['accuracy'])
    model.summary()

    batch_size = 128

    train_data = dataset.train_dataset() \
        .shuffle(8 * batch_size) \
        .batch(batch_size) \
        .prefetch(tf.data.experimental.AUTOTUNE)
    valid_data = dataset.test_dataset() \
        .batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)

    def lr_schedule(epoch):
        if 0 <= epoch < 35:
            return 0.01
        if 35 <= epoch < 65:
            return 0.005
        return 0.001

    model.fit(train_data,
              validation_data=valid_data,
              epochs=80,
              callbacks=[LearningRateScheduler(lr_schedule)])
    model.save("cnn-cifar10-binary.h5")
Example #2
0
def load_data(isTrain, batch_size=64, shuffle=False):
    dataset = CIFAR10('./cifar-10-batches-py',
                      train=isTrain,
                      transform=transforms.Compose([
                          transforms.ToTensor(),
                          transforms.Normalize([0.5, 0.5, 0.5],
                                               [0.5, 0.5, 0.5])
                      ]))
    data_loader = DataLoader(dataset=dataset,
                             batch_size=batch_size,
                             shuffle=shuffle)

    return data_loader
Example #3
0
def getClassWiseDataset(class_num, root, train, download, fillcolor, resample,
                        transform_pre, transform):
    cifar10_data = CIFAR10(root=root,
                           download=download,
                           train=train,
                           fillcolor=fillcolor,
                           resample=resample,
                           transform_pre=transform_pre,
                           transform=transform,
                           rot_bucket_width=opt.rot_bucket_width)
    if class_num == -1:
        return cifar10_data
    else:
        return [
            cifar10_data.__getitem__(i) for i in range(cifar10_data.__len__())
            if cifar10_data.__getitem__(i)[-1] == class_num
        ]
Example #4
0
    def get_loaders(self, stage: str, **kwargs):
        loaders = dict()
        data_params = dict(self.stages_config[stage]["data_params"])
        data_path = Path(os.environ["DATA_PATH"])

        if stage == "stage1":
            for mode in ["train", "valid"]:
                dataset = CIFAR10(
                    root=(data_path / "data_cifar").as_posix(),
                    train=(mode == "train"),
                    download=True,
                    transform=self.get_transforms(stage=stage, dataset=mode),
                )
                loaders[mode] = utils.get_loader(
                    dataset,
                    open_fn=lambda x: x,
                    dict_transform=lambda x: x,
                    shuffle=(mode == "train"),
                    sampler=None,
                    drop_last=(mode == "train"),
                    **data_params,
                )
        elif stage == "stage2":
            data_path = (data_path / "data_cat_dogs").as_posix() + "/*"
            tag_file_path = (data_path / "cat_dog_labeling.json").as_posix()
            train_data, valid_data, num_classes = get_cat_dogs_dataset(
                data_path, tag_file_path=tag_file_path)

            open_fn = get_reader(num_classes)
            data = [("train", train_data), ("valid", valid_data)]
            for mode, part in data:
                data_transform = self.get_transforms(stage=stage, dataset=mode)
                loaders[mode] = utils.get_loader(
                    part,
                    open_fn=open_fn,
                    dict_transform=data_transform,
                    shuffle=(mode == "train"),
                    sampler=None,
                    drop_last=(mode == "train"),
                    **data_params,
                )

        return loaders
Example #5
0
def getClassWiseDataset(class_num, root, shift, scale, fillcolor, train,
                        download, resample, matrix_transform, transform_pre,
                        transform):
    cifar10_data = CIFAR10(root=root,
                           shift=shift,
                           scale=scale,
                           fillcolor=fillcolor,
                           download=download,
                           train=train,
                           resample=resample,
                           matrix_transform=matrix_transform,
                           transform_pre=transform_pre,
                           transform=transform)
    if class_num == -1:  # for complete CIFAR10 dataset
        return cifar10_data
    else:  # for class-wise dataset
        return [
            cifar10_data.__getitem__(i) for i in range(cifar10_data.__len__())
            if cifar10_data.__getitem__(i)[-1] == class_num
        ]
Example #6
0
	print("Random Seed: ", MANUALSEED)
	random.seed(MANUALSEED)
	torch.manual_seed(MANUALSEED)

	cudnn.benchmark = True

	if torch.cuda.is_available() and not CUDA:
		print("WARNING: You have a CUDA device, so you should probably run with CUDA")

	train_dataset = CIFAR10(root=DATAROOT, shift=SHIFT, scale=(SHRINK, ENLARGE), fillcolor=(128, 128, 128), download=True, resample=PIL.Image.BILINEAR,
					matrix_transform=transforms.Compose([
							transforms.Normalize((0., 0., 16., 0., 0., 16., 0., 0.), (1., 1., 20., 1., 1., 20., 0.015, 0.015))
						]),
					transform_pre=transforms.Compose([
							transforms.RandomCrop(32, padding=4),
							transforms.RandomHorizontalFlip(),
						]),
					transform=transforms.Compose([
							transforms.ToTensor(),
							transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
						])
					)

	test_dataset = CIFAR10(root=DATAROOT, shift=(SHRINK, ENLARGE), fillcolor=(128, 128, 128), download=True, train=False, resample=PIL.Image.BILINEAR, 
				   matrix_transform=transforms.Compose([
				   		transforms.Normalize((0., 0., 16., 0., 0., 16., 0., 0.), (1., 1., 20., 1., 1., 20., 0.015, 0.015)),
				   	]),
				   transform_pre=transforms.Compose([
				   		transforms.RandomCrop(32, padding=4),
				   		transforms.RandomHorizontalFlip(),
				   	]),
Example #7
0
from cnn import CnnSearchSpace
from search_algorithms import AgingEvoSearch


def lr_schedule(epoch):
    if 0 <= epoch < 90:
        return 0.01
    if 90 <= epoch < 105:
        return 0.005
    return 0.001


search_algorithm = AgingEvoSearch

training_config = TrainingConfig(
    dataset=CIFAR10(),
    optimizer=lambda: tfa.optimizers.SGDW(
        learning_rate=0.01, momentum=0.9, weight_decay=1e-5),
    batch_size=128,
    epochs=130,
    callbacks=lambda: [LearningRateScheduler(lr_schedule)],
)

search_config = AgingEvoConfig(search_space=CnnSearchSpace(dropout=0.15),
                               rounds=6000,
                               checkpoint_dir="artifacts/cnn_cifar10")

bound_config = BoundConfig(error_bound=0.18,
                           peak_mem_bound=75000,
                           model_size_bound=75000,
                           mac_bound=30000000)
print "hid_layer_sizes =", hid_layer_sizes
print "batchsize =", batchsize
print "zae_threshold =", zae_threshold
print "momentum =", momentum
print "pretrain, zae:       lr = %f, epc = %d" % (pretrain_lr_zae,
                                                  pretrain_epc)
print "pretrain, lin:       lr = %f, epc = %d, wd = %.3f" % (
    pretrain_lr_lin, pretrain_epc, weightdecay)
print "logistic regression: lr = %f, epc = %d" % (logreg_lr, logreg_epc)
print "finetune:            lr = %f, epc = %d" % (finetune_lr, finetune_epc)

#############
# LOAD DATA #
#############

cifar10_data = CIFAR10()
train_x, train_y = cifar10_data.get_train_set()
test_x, test_y = cifar10_data.get_test_set()

print "\n... pre-processing"
preprocess_model = SubtractMeanAndNormalizeH(train_x.shape[1])
map_fun = theano.function([preprocess_model.varin], preprocess_model.output())

pca_obj = PCA()
pca_obj.fit(map_fun(train_x), retain=pca_retain, whiten=True)
preprocess_model = preprocess_model + pca_obj.forward_layer
preprocess_function = theano.function([preprocess_model.varin],
                                      preprocess_model.output())
train_x = preprocess_function(train_x)
test_x = preprocess_function(test_x)
Example #9
0
def runAllExperiments():
    global in_train_dataloader, in_test_dataloader, out_test_dataloader, ood_test_dataset, in_test_dataset, in_train_dataset, net
    all_results = []
    for i in range(10):
        opt.indist_class = i
        opt.net = "saved_models/class{}.pth".format(i)
        opt.ood_dataset = "cifar_non{}_class".format(i)
        print(opt)

        net = wrn_regressor().to(device)  # wide resenet

        if opt.cuda:
            net = torch.nn.DataParallel(net, device_ids=[int(opt.gpu)])

        if opt.net != '':
            net.load_state_dict(torch.load(opt.net))

        cifar10_ood_classes = []
        for j in range(10):
            if j != opt.indist_class:
                cifar10_ood_classes.append(j)

        ood_test_dataset = CIFAR10(root=opt.dataroot,
                                   fillcolor=(128, 128, 128),
                                   download=True,
                                   resample=PIL.Image.BILINEAR,
                                   train=False,
                                   transform_pre=None,
                                   transform=transforms.Compose([
                                       transforms.ToTensor(),
                                       transforms.Normalize(
                                           (0.4914, 0.4822, 0.4465),
                                           (0.2023, 0.1994, 0.2010)),
                                   ]),
                                   class_list=cifar10_ood_classes,
                                   rot_bucket_width=opt.rot_bucket_width)

        in_test_dataset = CIFAR10(root=opt.dataroot,
                                  fillcolor=(128, 128, 128),
                                  download=True,
                                  resample=PIL.Image.BILINEAR,
                                  train=False,
                                  transform_pre=None,
                                  transform=transforms.Compose([
                                      transforms.ToTensor(),
                                      transforms.Normalize(
                                          (0.4914, 0.4822, 0.4465),
                                          (0.2023, 0.1994, 0.2010)),
                                  ]),
                                  class_list=[opt.indist_class],
                                  rot_bucket_width=opt.rot_bucket_width)

        in_train_dataset = CIFAR10(root=opt.dataroot,
                                   fillcolor=(128, 128, 128),
                                   download=True,
                                   resample=PIL.Image.BILINEAR,
                                   train=True,
                                   transform_pre=transforms.Compose([
                                       transforms.RandomCrop(32, padding=4),
                                       transforms.RandomHorizontalFlip(),
                                   ]),
                                   transform=transforms.Compose([
                                       transforms.ToTensor(),
                                       transforms.Normalize(
                                           (0.4914, 0.4822, 0.4465),
                                           (0.2023, 0.1994, 0.2010)),
                                   ]),
                                   class_list=[opt.indist_class],
                                   rot_bucket_width=opt.rot_bucket_width)

        in_train_dataloader = torch.utils.data.DataLoader(
            in_train_dataset,
            batch_size=opt.batchSize,
            shuffle=False,
            num_workers=int(opt.workers))

        in_test_dataloader = torch.utils.data.DataLoader(
            in_test_dataset,
            batch_size=opt.batchSize,
            shuffle=False,
            num_workers=int(opt.workers))

        out_test_dataloader = torch.utils.data.DataLoader(
            ood_test_dataset,
            batch_size=opt.batchSize,
            shuffle=False,
            num_workers=int(opt.workers))

        avr_roc, std_roc = checkOOD_CEL()
        print("Average ROC and std for class {}: [{}] [{}]".format(
            i, avr_roc, std_roc))

        all_results.append("{} {}".format(avr_roc, std_roc))

    with open("all_results_valsize_{}.txt".format(opt.l), 'w') as f:
        f.write("\n".join(all_results))
Example #10
0
    if opt.net != '':
        net.load_state_dict(torch.load(opt.net))

    cifar10_ood_classes = []
    for i in range(10):
        if i != opt.indist_class:
            cifar10_ood_classes.append(i)

    ood_test_dataset = CIFAR10(root=opt.dataroot,
                               fillcolor=(128, 128, 128),
                               download=True,
                               resample=PIL.Image.BILINEAR,
                               train=False,
                               transform_pre=None,
                               transform=transforms.Compose([
                                   transforms.ToTensor(),
                                   transforms.Normalize(
                                       (0.4914, 0.4822, 0.4465),
                                       (0.2023, 0.1994, 0.2010)),
                               ]),
                               class_list=cifar10_ood_classes,
                               rot_bucket_width=opt.rot_bucket_width)

    in_test_dataset = CIFAR10(root=opt.dataroot,
                              fillcolor=(128, 128, 128),
                              download=True,
                              resample=PIL.Image.BILINEAR,
                              train=False,
                              transform_pre=None,
                              transform=transforms.Compose([
                                  transforms.ToTensor(),
Example #11
0
cudnn.benchmark = True

if torch.cuda.is_available() and not opt.cuda:
    print(
        "WARNING: You have a CUDA device, so you should probably run with --cuda"
    )

train_dataset = CIFAR10(root=opt.dataroot,
                        degrees=opt.rot,
                        translate=(opt.translate, opt.translate),
                        scale=(opt.shrink, opt.enlarge),
                        shear=opt.shear,
                        fillcolor=(128, 128, 128),
                        download=True,
                        resample=PIL.Image.BILINEAR,
                        transform_pre=transforms.Compose([
                            transforms.RandomCrop(32, padding=4),
                            transforms.RandomHorizontalFlip(),
                        ]),
                        transform=transforms.Compose([
                            transforms.ToTensor(),
                            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                                 (0.2023, 0.1994, 0.2010)),
                        ]))

assert train_dataset
train_dataloader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=opt.batchSize,
                                               shuffle=True,
                                               num_workers=int(opt.workers))
Example #12
0
                                nesterov=args.nesterov,
                                weight_decay=args.weight_decay)
    net.to('cuda')
    if torch.cuda.device_count() > 1:
        net = torch.nn.DataParallel(net)
    cudnn.benchmark = True
    criterion = nn.CrossEntropyLoss().cuda()
    # trainer
    if args.adversarial:
        if args.regu == 'no':
            trainer = AdversarialTrainer(net, criterion, optimizer, args)
        elif args.regu == 'random-svd':
            trainer = AdversarialOrthReguTrainer(net, criterion, optimizer,
                                                 args)
        else:
            raise Exception('Invalid setting for adversarial training')
    else:
        if args.regu == 'no':
            trainer = Trainer(net, criterion, optimizer, args)
        elif args.regu == 'random-svd':
            trainer = OrthReguTrainer(net, criterion, optimizer, args)
        else:
            raise Exception('Invalid regularization term')
    # data
    if args.dataset == 'cifar100':
        data = CIFAR100(root=args.data, batch_size=args.batch_size)
    else:
        data = CIFAR10(root=args.data, batch_size=args.batch_size)
    # start
    best_acc = trainer.run(data, args.epochs)
Example #13
0
if torch.cuda.is_available() and not opt.cuda:
    print(
        "WARNING: You have a CUDA device, so you should probably run with --cuda"
    )

train_dataset = CIFAR10(root=opt.dataroot,
                        shift=opt.shift,
                        scale=(opt.shrink, opt.enlarge),
                        fillcolor=(128, 128, 128),
                        download=True,
                        resample=PIL.Image.BILINEAR,
                        matrix_transform=transforms.Compose([
                            transforms.Normalize(
                                (0., 0., 16., 0., 0., 16., 0., 0.),
                                (1., 1., 20., 1., 1., 20., 0.015, 0.015)),
                        ]),
                        transform_pre=transforms.Compose([
                            transforms.RandomCrop(32, padding=4),
                            transforms.RandomHorizontalFlip(),
                        ]),
                        transform=transforms.Compose([
                            transforms.ToTensor(),
                            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                                 (0.2023, 0.1994, 0.2010)),
                        ]))

assert train_dataset
train_dataloader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=opt.batchSize,
                                               shuffle=True,
                                               num_workers=int(opt.workers))
def getOutDistClassDataset(class_num, root, shift, scale, fillcolor, download, resample, matrix_transform, transform_pre, transform):
    cifar10_data = CIFAR10(root=root, shift=shift, scale=scale, fillcolor=fillcolor, download=download, train=False, resample=resample, matrix_transform=matrix_transform, transform_pre=transform_pre, transform=transform)
    return [cifar10_data.__getitem__(i) for i in range(cifar10_data.__len__()) if cifar10_data.__getitem__(i)[-1]!=class_num]
from cnn import CnnSearchSpace
from search_algorithms import AgingEvoSearch

search_algorithm = AgingEvoSearch


def lr_schedule(epoch):
    if 0 <= epoch < 35:
        return 0.01
    if 35 <= epoch < 65:
        return 0.005
    return 0.001


training_config = TrainingConfig(
    dataset=CIFAR10(binary=True),
    optimizer=lambda: tfa.optimizers.SGDW(lr=0.01, momentum=0.9, weight_decay=1e-5),
    batch_size=128,
    epochs=80,
    callbacks=lambda: []
)

search_config = AgingEvoConfig(
    search_space=CnnSearchSpace(),
    checkpoint_dir="artifacts/cnn_cifar10"
)

bound_config = BoundConfig(
    error_bound=0.3,
    peak_mem_bound=3000,
    model_size_bound=2000,