# Forming a mixed data batch
        #if idx_stream*opts['batch_size'] > max_interval_duration:
        #  break
        current_class = stream_classes[np.random.randint(classes_per_interval)]
        (X_stream, Y_stream,
         _) = next(iter(stream_loaders[str(current_class)]))
        mixed_batch_data = []
        mixed_batch_labels = []
        mixed_batch_reconstruction_counter = []
        mixed_batch_data.append(X_stream.cuda())
        mixed_batch_labels.append(Y_stream.long().cuda())
        for idx_real in range(real_batches):
            real_batch = next(iter(real_loader))
            mixed_batch_data.append(real_batch[0].cuda())
            mixed_batch_labels.append(real_batch[1].long().cuda())
        real_buffer.add_batch(X_stream, current_class, 0)
        nb_of_batches = len(mixed_batch_labels)
        #print('Batches forming a big one: {}'.format(nb_of_batches))
        inputs = torch.stack(mixed_batch_data).reshape(
            opts['batch_size'] * nb_of_batches, feature_size)
        labels = torch.stack(mixed_batch_labels).reshape(opts['batch_size'] *
                                                         nb_of_batches)

        # Updating the classifier
        outputs = classifier(inputs)
        #classification_loss = classification_criterion(outputs, labels)
        classification_loss = classification_criterion(outputs, labels)
        #classification_loss.backward(retain_graph=True)
        classification_loss.backward()
        classification_optimizer.step()
        classification_optimizer.zero_grad()
Esempio n. 2
0
generative_optimizer = torch.optim.Adam(gen_model.parameters(), lr=opts['learning_rate'], betas=(0.9, 0.999), weight_decay=1e-5)
generative_criterion_cl = nn.MSELoss()
generative_criterion_cl.cuda()
generative_criterion_rec = nn.MSELoss()
generative_criterion_rec.cuda()

# ---------------------------------- FILLING THE BUFFERS WITH THE HISTORICAL DATA ----------------------------------------------
prev_classes = [0, 1, 2, 3, 4]
historical_buffer = Data_Buffer(60, 100)
real_buffer = Data_Buffer(1, 100)
for idx_class in prev_classes:
  indices_prev = get_indices_for_classes(trainset, [idx_class])
  prev_loader = DataLoader(trainset, batch_size=opts['batch_size'], sampler = SubsetRandomSampler(indices_prev),  drop_last=True)
  for batch, label in prev_loader:                                                                                
    historical_buffer.add_batch(gen_model.encoder(batch.cuda()).data, idx_class)
    real_buffer.add_batch(batch.cuda(), idx_class)

max_accuracy = 0
fake_batches = 10
real_batches = 1
known_classes = [int(a) for a in historical_buffer.dbuffer.keys()]
indices_test = get_indices_for_classes(testset, known_classes)
test_loader = DataLoader(testset, batch_size=1000, sampler = SubsetRandomSampler(indices_test))
acc_real = test_classifier(classifier, test_loader)
acc_fake = test_classifier_on_generator(classifier, gen_model, test_loader)
print('Real test accuracy on known classes prior to stream training: {:.8f}'.format(acc_real))    
print('Reconstructed test accuracy on known classes prior to stream training: {:.8f}'.format(acc_fake))  

# --------------------------------------------------- STREAM TRAINING ----------------------------------------------------------
stream_duration = 100
    './data/Synthetic/data_train_test_500_classes_128_features_2000_samples.pth'
)
trainset = TensorDataset(full_data['data_train'], full_data['labels_train'])
testset = TensorDataset(full_data['data_test'], full_data['labels_test'])

print('Reshaping data into readable format')
prev_classes = list(range(500))
data_buffer = Data_Buffer(4, opts['batch_size'])
for idx_class in prev_classes:
    indices_prev = get_indices_for_classes(trainset, [idx_class])
    prev_loader = DataLoader(trainset,
                             batch_size=opts['batch_size'],
                             sampler=SubsetRandomSampler(indices_prev),
                             drop_last=True)
    for batch, label in prev_loader:
        data_buffer.add_batch(batch.cuda(), idx_class)

print('Ended reshaping')
# Initializing data loaders for first 5 classes

train_loader = DataLoader(trainset,
                          batch_size=opts['batch_size'],
                          shuffle=True)
test_loader = DataLoader(testset, batch_size=opts['batch_size'], shuffle=False)

# Initializing classification model
classifier = Classifier_128_features(nb_of_classes)
classifier_dict = torch.load(
    './pretrained_models/full_classifier_synthetic_data.pth')
classifier.load_state_dict(classifier_dict)
classifier.cuda()