예제 #1
0
def main():

    #define images transformation
    #define images transformation
    train_transform = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010))
        #transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))
    ])

    test_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010))
        #transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))
    ])

    #creo i dataset per ora prendo solo le prime 10 classi per testare, ho esteso la classe cifar 100 con attributo
    #classes che è una lista di labels, il dataset carica solo le foto con quelle labels

    range_classes = np.arange(100)
    classes_groups = np.array_split(range_classes, 10)

    net = iCaRL(10)

    for i in range(1):  #range(int(100/ClASSES_BATCH)):

        print('classes_group', classes_groups[i])

        train_dataset = CIFAR100(root='data/',
                                 classes=classes_groups[i],
                                 train=True,
                                 download=True,
                                 transform=train_transform)
        test_dataset = CIFAR100(root='data/',
                                classes=classes_groups[i],
                                train=False,
                                download=True,
                                transform=test_transform)

        print(len(train_dataset))
        train_dataloader = DataLoader(train_dataset,
                                      batch_size=BATCH_SIZE,
                                      shuffle=True,
                                      drop_last=True,
                                      num_workers=4)
        test_dataloader = DataLoader(test_dataset,
                                     batch_size=BATCH_SIZE,
                                     shuffle=False,
                                     drop_last=False,
                                     num_workers=4)

        net.update_representation(dataset=train_dataset)
예제 #2
0
def get_datasets(classes):
    #define images transformation
    #define images transformation

    train_dataset = CIFAR100(root='data/', classes=classes, train=True, download=True, transform=train_transform)
    test_dataset = CIFAR100(root='data/', classes=classes,  train=False, download=True, transform=test_transform)

    train_indices, val_indices = train_test_split(range(len(train_dataset)), test_size=0.1, stratify=train_dataset.targets)

    val_dataset = Subset(copy.deepcopy(train_dataset), val_indices)
    train_dataset = Subset(train_dataset, train_indices)

    val_dataset.dataset.transform = test_transform

    return train_dataset, val_dataset, test_dataset
예제 #3
0
def main():

    #define images transformation
    train_transform = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010))
        #transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))
    ])

    test_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010))
        #transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))
    ])

    #creo i dataset per ora prendo solo le prime 10 classi per testare, ho esteso la classe cifar 100 con attributo
    #classes che è una lista di labels, il dataset carica solo le foto con quelle labels

    range_classes = np.arange(100)
    classes_groups = np.array_split(range_classes, 10)

    #net = resnet18()
    net = resnet32()

    for i in range(int(100 / ClASSES_BATCH)):
        #cambio il numero di classi di output
        net.fc = nn.Linear(64, 10 + i * 10)

        if i != 0:

            #creating dataset for current iteration
            train_dataset = CIFAR100(root='data/',
                                     classes=classes_groups[i],
                                     train=True,
                                     download=True,
                                     transform=train_transform)
            test_dataset = CIFAR100(root='data/',
                                    classes=classes_groups[i],
                                    train=False,
                                    download=True,
                                    transform=test_transform)

            #creating dataset for test on previous classes
            previous_classes = np.array([])
            for j in range(i):
                previous_classes = np.concatenate(
                    (previous_classes, classes_groups[j]))
            test_prev_dataset = CIFAR100(root='data/',
                                         classes=previous_classes,
                                         train=False,
                                         download=True,
                                         transform=test_transform)

            #creating dataset for all classes
            all_classes = np.concatenate((previous_classes, classes_groups[i]))
            test_all_dataset = CIFAR100(root='data/',
                                        classes=all_classes,
                                        train=False,
                                        download=True,
                                        transform=test_transform)

            #creating dataloaders
            train_dataloader = DataLoader(train_dataset,
                                          batch_size=BATCH_SIZE,
                                          shuffle=True,
                                          drop_last=True,
                                          num_workers=4)
            test_dataloader = DataLoader(test_dataset,
                                         batch_size=BATCH_SIZE,
                                         shuffle=False,
                                         drop_last=False,
                                         num_workers=4)
            test_prev_dataloader = DataLoader(test_prev_dataset,
                                              batch_size=BATCH_SIZE,
                                              shuffle=False,
                                              drop_last=False,
                                              num_workers=4)
            test_all_dataloader = DataLoader(test_all_dataset,
                                             batch_size=BATCH_SIZE,
                                             shuffle=False,
                                             drop_last=False,
                                             num_workers=4)

            net = train(net, train_dataloader)
            print('Test on new classes')
            test(net, test_dataloader)
            print('Test on old classes')
            test(net, test_prev_dataloader)
            print('Test on all classes')
            test(net, test_all_dataloader)

        else:
            train_dataset = CIFAR100(root='data/',
                                     classes=classes_groups[i],
                                     train=True,
                                     download=True,
                                     transform=train_transform)
            test_dataset = CIFAR100(root='data/',
                                    classes=classes_groups[i],
                                    train=False,
                                    download=True,
                                    transform=test_transform)
            train_dataloader = DataLoader(train_dataset,
                                          batch_size=BATCH_SIZE,
                                          shuffle=True,
                                          drop_last=True,
                                          num_workers=4)
            test_dataloader = DataLoader(test_dataset,
                                         batch_size=BATCH_SIZE,
                                         shuffle=False,
                                         drop_last=False,
                                         num_workers=4)
            net = train(net, train_dataloader)
            print('Test on first 10 classes')
            test(net, test_dataloader)

        if i == 1:
            return  #per fare solo la prima iterazione (10 classi) fin quando non si replicano i risultati
예제 #4
0
def get_additional_datasets(prev_classes, all_classes):

    test_prev_dataset = CIFAR100(root='data/', classes=prev_classes,  train=False, download=True, transform=test_transform)
    test_all_dataset = CIFAR100(root='data/', classes=all_classes,  train=False, download=True, transform=test_transform)

    return test_prev_dataset, test_all_dataset
예제 #5
0
                                nesterov=args.nesterov,
                                weight_decay=args.weight_decay)
    net.to('cuda')
    if torch.cuda.device_count() > 1:
        net = torch.nn.DataParallel(net)
    cudnn.benchmark = True
    criterion = nn.CrossEntropyLoss().cuda()
    # trainer
    if args.adversarial:
        if args.regu == 'no':
            trainer = AdversarialTrainer(net, criterion, optimizer, args)
        elif args.regu == 'random-svd':
            trainer = AdversarialOrthReguTrainer(net, criterion, optimizer,
                                                 args)
        else:
            raise Exception('Invalid setting for adversarial training')
    else:
        if args.regu == 'no':
            trainer = Trainer(net, criterion, optimizer, args)
        elif args.regu == 'random-svd':
            trainer = OrthReguTrainer(net, criterion, optimizer, args)
        else:
            raise Exception('Invalid regularization term')
    # data
    if args.dataset == 'cifar100':
        data = CIFAR100(root=args.data, batch_size=args.batch_size)
    else:
        data = CIFAR10(root=args.data, batch_size=args.batch_size)
    # start
    best_acc = trainer.run(data, args.epochs)
예제 #6
0
파일: evaluate.py 프로젝트: ATPGN/ATPGN
def evaluate(dataset='CIFAR100'):
    batch_size = 128
    test_num = 10000
    defense_list = ['Naive', 'Goodfellow', 'Madry', 'PGN']
    model_path_list = []
    for defense in defense_list:
        for i in os.listdir('save/%s/%s' % (dataset, defense)):
            if os.path.exists('save/%s/%s/%s/model.joblib' %
                              (dataset, defense, i)):
                model_path_list.append('save/%s/%s/%s/model.joblib' %
                                       (dataset, defense, i))

    if dataset == 'CIFAR100':
        data = CIFAR100(test_start=0, test_end=test_num)
        x_test, y_test = data.get_set('test')
        x = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
        y = tf.placeholder(tf.float32, shape=(None, 100))
    elif dataset == 'CIFAR10':
        data = CIFAR10(test_start=0, test_end=test_num)
        x_test, y_test = data.get_set('test')
        x = tf.placeholder(tf.float32, shape=(None, 32, 32, 3))
        y = tf.placeholder(tf.float32, shape=(None, 10))

    sess = tf.Session()

    cw_params = {
        'batch_size': 128,
        'clip_min': 0.,
        'clip_max': 1.,
        'max_iterations': 100,
        'y': y
    }

    eval_params = {'batch_size': batch_size}

    def do_eval(preds, x_set, y_set, report_text):
        acc = model_eval(sess, x, y, preds, x_set, y_set, args=eval_params)
        print('Test accuracy on %s: %0.4f' % (report_text, acc))
        return acc

    def get_adv_x_numpy(adv_x, attack_success_index, x_set, y_set):
        result = []
        result_index = []
        nb_batches = int(math.ceil(float(len(x_set)) / batch_size))
        X_cur = np.zeros((batch_size, ) + x_set.shape[1:], dtype=x_set.dtype)
        Y_cur = np.zeros((batch_size, ) + y_set.shape[1:], dtype=y_set.dtype)
        for batch in range(nb_batches):
            start = batch * batch_size
            end = min(len(x_set), start + batch_size)
            cur_batch_size = end - start
            X_cur[:cur_batch_size] = x_set[start:end]
            Y_cur[:cur_batch_size] = y_set[start:end]
            feed_dict = {x: X_cur, y: Y_cur}
            adv_x_numpy, success_index = sess.run(
                [adv_x, attack_success_index], feed_dict=feed_dict)
            result.append(adv_x_numpy[:cur_batch_size])
            result_index.append(success_index[:cur_batch_size])
        return np.concatenate(result, axis=0), np.concatenate(result_index,
                                                              axis=0)

    print(model_path_list)
    acc_dict = {}
    l2mean_dict = {}
    for model_path in model_path_list:
        defense = model_path.split('/')[2]
        if not defense in acc_dict:
            acc_dict[defense] = []
        if not defense in l2mean_dict:
            l2mean_dict[defense] = []

        if os.path.exists(
                os.path.join(os.path.dirname(model_path), 'cash_result')):
            with open(os.path.join(os.path.dirname(model_path), 'cash_result'),
                      'r') as f:
                cash_result_str = f.read()
                acc, l2mean, model_create_time = cash_result_str.split(",")

            if int(model_create_time) == int(os.path.getctime(model_path)):
                acc_dict[defense].append(float(acc))
                l2mean_dict[defense].append(float(l2mean))
                print(model_path, acc, l2mean)
                continue

        with sess.as_default():
            model = load(model_path)

        attack_model = CarliniWagnerL2(model, sess=sess)
        attack_params = cw_params

        preds = model.get_logits(x)
        acc = do_eval(preds, x_test[:test_num], y_test[:test_num],
                      'DEFENSE : %s' % defense)
        adv_x = attack_model.generate(x, **attack_params)
        preds_adv = model.get_logits(adv_x)
        attack_success_index = tf.math.not_equal(tf.argmax(preds_adv, axis=-1),
                                                 tf.argmax(y, axis=-1))
        adv_x_numpy, success_index = get_adv_x_numpy(adv_x,
                                                     attack_success_index,
                                                     x_test[:test_num],
                                                     y_test[:test_num])
        print('C&W attack success_rate = %f' % np.mean(success_index))

        l2mean = np.mean(
            np.sqrt(
                np.sum(np.power(
                    adv_x_numpy[success_index] -
                    x_test[:test_num][success_index], 2),
                       axis=(1, 2, 3))))

        acc_dict[defense].append(acc)
        l2mean_dict[defense].append(l2mean)
        print(model_path, acc, l2mean)
        with open(os.path.join(os.path.dirname(model_path), 'cash_result'),
                  'w') as f:
            f.write('%.4f,%.4f,%d' %
                    (acc, l2mean, os.path.getctime(model_path)))

    for defense in defense_list:
        if not defense in l2mean_dict:
            continue

        l2mean_dict[defense] = np.array(l2mean_dict[defense])
        acc_dict[defense] = np.array(acc_dict[defense])
        arg_l2mean_dict = np.argsort(l2mean_dict[defense])
        l2mean_dict[defense] = l2mean_dict[defense][arg_l2mean_dict]
        acc_dict[defense] = acc_dict[defense][arg_l2mean_dict]
        plt.plot(l2mean_dict[defense], acc_dict[defense], '-o', label=defense)
    plt.legend()
    plt.xlabel('$\\rho_{cw}$')
    plt.ylabel('benign accuracy')
    plt.title("RESULT FOR %s" % dataset)

    fig_save_dir = 'evaluate/%s' % dataset
    if not os.path.exists(fig_save_dir):
        os.makedirs(fig_save_dir)
    plt.savefig('%s/robustness-curve.png' % fig_save_dir)
예제 #7
0
파일: LWF.py 프로젝트: alallala/IL_project
def main():
	#  Define images transformation
	train_transform = transforms.Compose([transforms.RandomCrop(32, padding=4),
				      transforms.RandomHorizontalFlip(),
				      transforms.ToTensor(),
				      transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
				      #transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))
				     ])

	test_transform = transforms.Compose([transforms.ToTensor(),
				     transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
				     #transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))
				    ])

	#creo i dataset per ora prendo solo le prime 10 classi per testare, ho esteso la classe cifar 100 con attributo
	#classes che è una lista di labels, il dataset carica solo le foto con quelle labels

	#range_classes = np.arange(100)
	total_classes = 20   #try with 2 iterations 
	perm_id = np.random.permutation(total_classes)
	all_classes = np.arange(total_classes)
	
	for i in range(len(all_classes)):
	  all_classes[i] = perm_id[all_classes[i]]

	# Create groups of 10
	classes_groups = np.array_split(all_classes, 10)
	print(classes_groups)
	
	# Create class map
	class_map = {}
	for i, cl in enumerate(all_classes):
		class_map[cl] = i
	print ("Class map:", class_map)
	
	# Create class map reversed
	map_reverse = {}
	for cl, map_cl in class_map.items():
		map_reverse[map_cl] = int(cl)
	print ("Map Reverse:", map_reverse)

	# Create Network
	net = LwF(NUM_CLASSES,class_map)

	#for i in range(int(total_classes//CLASSES_BATCH)):

        for s in range(0, num_iters, NUM_CLASSES):
                # Load Datasets
		print('Iteration: ', s)
		print("Loading training examples for classes", all_classes[s: s+NUM_CLASSES])
		train_dataset = CIFAR100(root='data',train=True,classes=all_classes[s:s+NUM_CLASSES],download=True,transform=train_transform)
		train_dataloader = CIFAR100(train_dataset, batch_size=BATCH_SIZE,shuffle=True, num_workers=4)
		test_dataset = cifar100(root='data',train=False,classes=all_classes[:s+NUM_CLASSES],download=True, transform=test_transform)
                test_dataloader = DataLoader(test_dataset, batch_size=BATCH_SIZE,shuffle=False, num_workers=4)
												   

	'''for i in range(1):

		train_dataset = CIFAR100(root='data/', classes=classes_groups[i], train=True, download=True, transform=train_transform)
		test_dataset = CIFAR100(root='data/', classes=classes_groups[i],  train=False, download=True, transform=test_transform)

		#train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, drop_last=True, num_workers=4)
		test_dataloader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False, drop_last=False, num_workers=4)'''

		# UPDATE STEP on train set
		net.update(train_dataset, class_map)
		# net.update(dataset = train_dataset)

		# EVALUATION STEP on training set and test set   
		net.eval()

                net.n_known = net.n_classes
		
		print ("model classes : %d, " % net.n_known)

		total = 0.0
		correct = 0.0

		for images, labels, indices in train_dataloader:

			images = Variable(images).cuda()
			preds = net.classify(images)
			preds = [map_reverse[pred] for pred in preds.cpu().numpy()]
			total += labels.size(0)
			correct += (preds == labels.numpy()).sum()

		# Train Accuracy
		#print ('%.2f ,' % (100.0 * correct / total), file=file, end="")
		print ('Train Accuracy : %.2f ,' % (100.0 * correct / total))



		# net.classify(...)
		total = 0.0
		correct = 0.0
		for images, labels, indices in test_dataloader:

			images = Variable(images).cuda()
			preds = net.classify(images)
			preds = [map_reverse[pred] for pred in preds.cpu().numpy()]
			total += labels.size(0)
			correct += (preds == labels.numpy()).sum()

		# Test Accuracy
		#print ('%.2f' % (100.0 * correct / total), file=file)
		print ('Test Accuracy : %.2f' % (100.0 * correct / total))

                net.train()
예제 #8
0
def main():
    #  Define images transformation
    train_transform = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010))
        #transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))
    ])

    test_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010))
        #transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))
    ])

    print("\n")

    total_classes = 100

    perm_id = np.random.permutation(total_classes)
    all_classes = np.arange(total_classes)
    '''
    #mix the classes indexes
    for i in range(len(all_classes)):
      all_classes[i] = perm_id[all_classes[i]]

    #Create groups of 10
    #classes_groups = np.array_split(all_classes, 10)
    #print(classes_groups)

    #num_iters = total_classes//CLASSES_BATCH      
    # Create class map

    class_map = {}
    #takes 10 new classes randomly
    for i, cl in enumerate(all_classes):
        class_map[cl] = i
    print (f"Class map:{class_map}\n")     
    
    # Create class map reversed
    map_reverse = {}
    for cl, map_cl in class_map.items():
        map_reverse[map_cl] = int(cl)
    print (f"Map Reverse:{map_reverse}\n")
    '''

    # Create Network
    net = LwF(2048)

    #iterating until the net knows total_classes with 10 by 10 steps

    for s in range(
            0, total_classes, CLASSES_BATCH
    ):  #c'era (0, num_iter,CLASSES_BATCH), modificato perchè altrimenti avevamo num_iter=10
        #CLASSES_BATCH= 10 quindi s andava da 0 a 10 e si fermava
        #ora s parte da zero, salta di 10 in 10, fino ad arrivare a 100.. in totale fa 10 iter

        print(
            f"ITERATION: {(s//CLASSES_BATCH)+1} / {total_classes//CLASSES_BATCH}\n"
        )

        print("\n")

        # Load Datasets

        #train data_loader loads images in classes [0:10] then in [10:20] etc..
        train_dataset = CIFAR100(root='data',
                                 train=True,
                                 classes=all_classes[s:s + CLASSES_BATCH],
                                 download=True,
                                 transform=train_transform)
        train_dataloader = DataLoader(train_dataset,
                                      batch_size=BATCH_SIZE,
                                      shuffle=True,
                                      num_workers=4)
        #test data_loader loades images in classes [0:10] then [0:20] etc..
        test_dataset = CIFAR100(root='data',
                                train=False,
                                classes=all_classes[:s + CLASSES_BATCH],
                                download=True,
                                transform=test_transform)
        test_dataloader = DataLoader(test_dataset,
                                     batch_size=BATCH_SIZE,
                                     shuffle=False,
                                     num_workers=4)

        net._before_task(train_dataloader)

        net._added_n_classes(CLASSES_BATCH)

        net.train()

        net._train_task(train_dataloader)

        net.eval()

        net.eval(test_dataloader)
예제 #9
0
os.makedirs(exp_path, exist_ok=True)

transform_train = transforms.Compose([
    transforms.RandomCrop(32, padding=4),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.5071, 0.4866, 0.4409],
                         std=[0.2675, 0.2565, 0.2761]),
])
transform_test = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.5071, 0.4866, 0.4409],
                         std=[0.2675, 0.2565, 0.2761]),
])

trainset = CIFAR100('./data', train=True, transform=transform_train)
valset = CIFAR100('./data', train=False, transform=transform_test)
num_classes = 100

train_loader = DataLoader(trainset, batch_size=args.batch_size, \
            shuffle=True, num_workers=3, pin_memory=True)
val_loader = DataLoader(valset, batch_size=args.batch_size, \
            shuffle=False, num_workers=3, pin_memory=True)

ckpt_path = osp.join('{}/ckpt/{}.pth'.format( \
                args.teacher_path, args.teacher_ckpt))
t_model = model_dict[teacher_arch](num_classes=num_classes).cuda()
state_dict = torch.load(ckpt_path)['state_dict']
t_model.load_state_dict(state_dict)
t_model.eval()