예제 #1
0
classification_criterion.cuda()

gen_model = autoencoder_MNIST_512_features(32)
gen_dict = torch.load('./pretrained_models/AE_5_classes_32_code_size.pth')
gen_model.load_state_dict(gen_dict)
gen_model.cuda()

generative_optimizer = torch.optim.Adam(gen_model.parameters(), lr=opts['learning_rate'], betas=(0.9, 0.999), weight_decay=1e-5)
generative_criterion_cl = nn.MSELoss()
generative_criterion_cl.cuda()
generative_criterion_rec = nn.MSELoss()
generative_criterion_rec.cuda()

# ---------------------------------- FILLING THE BUFFERS WITH THE HISTORICAL DATA ----------------------------------------------
prev_classes = [0, 1, 2, 3, 4]
historical_buffer = Data_Buffer(60, 100)
real_buffer = Data_Buffer(1, 100)
for idx_class in prev_classes:
  indices_prev = get_indices_for_classes(trainset, [idx_class])
  prev_loader = DataLoader(trainset, batch_size=opts['batch_size'], sampler = SubsetRandomSampler(indices_prev),  drop_last=True)
  for batch, label in prev_loader:                                                                                
    historical_buffer.add_batch(gen_model.encoder(batch.cuda()).data, idx_class)
    real_buffer.add_batch(batch.cuda(), idx_class)

max_accuracy = 0
fake_batches = 10
real_batches = 1
known_classes = [int(a) for a in historical_buffer.dbuffer.keys()]
indices_test = get_indices_for_classes(testset, known_classes)
test_loader = DataLoader(testset, batch_size=1000, sampler = SubsetRandomSampler(indices_test))
acc_real = test_classifier(classifier, test_loader)
예제 #2
0
    './pretrained_models/AE_5_classes_32_code_size_MNIST.pth')
gen_model.load_state_dict(gen_dict)
gen_model.cuda()

generative_optimizer = torch.optim.Adam(gen_model.parameters(),
                                        lr=opts['learning_rate'],
                                        betas=(0.9, 0.999),
                                        weight_decay=1e-5)
generative_criterion_cl = nn.MSELoss()
generative_criterion_cl.cuda()
generative_criterion_rec = nn.MSELoss()
generative_criterion_rec.cuda()

# ---------------------------------- FILLING THE BUFFERS WITH THE HISTORICAL DATA ----------------------------------------------

codes_storage = Data_Buffer(codes_storage_size, opts['batch_size'])
codes_storage.cuda_device = cuda_device

real_buffer = Data_Buffer(real_buffer_size, opts['batch_size'])
real_buffer.cuda_device = cuda_device
"""
Filling in historical buffers
"""
pretrained_on_classes = list(range(5))
real_buffer.load_from_tensor_dataset(full_original_trainset,
                                     pretrained_on_classes)
codes_storage.load_from_tensor_dataset(full_original_trainset,
                                       pretrained_on_classes,
                                       gen_model.encoder)

print('Historical data successfully loaded from the datasets')
예제 #3
0
Classifier = models.Classifier_LSUN
test_classifier = sup_functions.test_classifier
test_classifier_on_generator = sup_functions.test_classifier_on_generator
get_indices_for_classes = sup_functions.get_indices_for_classes

trainset = torch.load('../datasets/LSUN/trainset.pth')
testset = torch.load('../datasets/LSUN/testset.pth')

original_trainset = TensorDataset(trainset[0], trainset[1], torch.zeros(trainset[1].shape))
trainset = TensorDataset(trainset[0], trainset[1], torch.zeros(trainset[1].shape))
testset = TensorDataset(testset[0], testset[1])

# Loading the datasets
print('Reshaping data into readable format')
hist_buffer = Data_Buffer(real_batches_to_add or 1, opts['batch_size'])
hist_buffer.load_from_tensor_dataset(trainset, list(range(30)))
hist_buffer.make_tensor_dataset()
hist_data = hist_buffer.tensor_dataset

data_buffer = Data_Buffer(batches_per_class, opts['batch_size'])
data_buffer.load_from_tensor_dataset(trainset, list(range(30)))                                         
data_buffer.cuda_device = cuda_device

print('Ended reshaping')
# Initializing data loaders for first 5 classes

#train_loader = DataLoader(original_trainset, batch_size=opts['batch_size'], shuffle=True)
test_loader = DataLoader(testset, batch_size=opts['batch_size'], shuffle=False)

# Initializing classification model
    indices = torch.FloatTensor(
        list((data.tensors[1].long() == class_).tolist()
             for class_ in data_classes)).sum(0).nonzero().long().squeeze()
    return indices


trainset = torch.load('../datasets/LSUN/testset.pth')
testset = torch.load('../datasets/LSUN/testset.pth')

original_trainset = TensorDataset(trainset[0], trainset[1],
                                  torch.zeros(trainset[1].shape))
testset = TensorDataset(testset[0], testset[1])

# Loading the datasets
print('Reshaping data into readable format')
data_buffer = Data_Buffer(batches_per_class, opts['batch_size'])
data_buffer.add_batches_from_dataset(original_trainset, list(range(30)),
                                     batches_per_class)
data_buffer.cuda_device = cuda_device

orig_buffer = Data_Buffer(real_batches_to_add, opts['batch_size'])
orig_buffer.add_batches_from_dataset(original_trainset, list(range(30)),
                                     real_batches_to_add)
orig_buffer.cuda_device = cuda_device
original_trainset = orig_buffer.make_tensor_dataset()

print('Ended reshaping')
# Initializing data loaders for first 5 classes

#train_loader = DataLoader(original_trainset, batch_size=opts['batch_size'], shuffle=True)
test_loader = DataLoader(testset, batch_size=opts['batch_size'], shuffle=False)
# ----------------------------------------- LOADING PRETRAINED MODELS ----------------------------------------------------------
# Initializing classification model
classifier = models.Classifier_MNIST(nb_of_classes)
class_dict = torch.load('./pretrained_models/classifier_5_MNIST.pth')
classifier.load_state_dict(class_dict)
classification_optimizer = optim.Adam(classifier.parameters(),
                                      lr=opts['learning_rate'],
                                      betas=(0.9, 0.999),
                                      weight_decay=1e-5)
classification_criterion = nn.CrossEntropyLoss()
classifier.cuda()
classification_criterion.cuda()

# ---------------------------------- FILLING THE BUFFERS WITH THE HISTORICAL DATA ----------------------------------------------

real_buffer = Data_Buffer(real_buffer_size, opts['batch_size'])
real_buffer.cuda_device = cuda_device
"""
Filling in historical buffers
"""
pretrained_on_classes = list(range(5))
real_buffer.load_from_tensor_dataset(full_original_trainset,
                                     pretrained_on_classes)

print('Historical data successfully loaded from the datasets')

max_accuracy = 0
print('Testing already acquired knowledge prior to stream training')

known_classes = pretrained_on_classes
indices_test = sup_functions.get_indices_for_classes(full_original_testset,
    indices = torch.FloatTensor(
        list((data.tensors[1].long() == class_).tolist()
             for class_ in data_classes)).sum(0).nonzero().long().squeeze()
    return indices[torch.randperm(len(indices))]


# Loading the datasets
full_data = torch.load(
    './data/Synthetic/data_train_test_500_classes_128_features_2000_samples.pth'
)
trainset = TensorDataset(full_data['data_train'], full_data['labels_train'])
testset = TensorDataset(full_data['data_test'], full_data['labels_test'])

print('Reshaping data into readable format')
prev_classes = list(range(500))
data_buffer = Data_Buffer(4, opts['batch_size'])
for idx_class in prev_classes:
    indices_prev = get_indices_for_classes(trainset, [idx_class])
    prev_loader = DataLoader(trainset,
                             batch_size=opts['batch_size'],
                             sampler=SubsetRandomSampler(indices_prev),
                             drop_last=True)
    for batch, label in prev_loader:
        data_buffer.add_batch(batch.cuda(), idx_class)

print('Ended reshaping')
# Initializing data loaders for first 5 classes

train_loader = DataLoader(trainset,
                          batch_size=opts['batch_size'],
                          shuffle=True)