Ejemplo n.º 1
0
    dataset = data_handler.DatasetFactory.get_dataset(args.dataset)
    if args.dataset == 'CIFAR100':
        loader = None
    else:
        loader = dataset.loader

    # Loader used for training data ####################################################
    shuffle_idx = shuffle(np.arange(dataset.classes), random_state=args.seed)
    print(shuffle_idx)
    train_dataset_loader = data_handler.IncrementalLoader(dataset.train_data,
                                                          dataset.train_labels,
                                                          dataset.classes,
                                                          args.step_size,
                                                          args.memory_budget,
                                                          'train',
                                                          transform=dataset.train_transform,
                                                          loader=loader,
                                                          shuffle_idx=shuffle_idx,
                                                          base_classes=args.base_classes,
                                                          approach=args.trainer
                                                          )
    # Loader for evaluation
    evaluate_dataset_loader = data_handler.IncrementalLoader(dataset.train_data,
                                                             dataset.train_labels,
                                                             dataset.classes,
                                                             args.step_size,
                                                             args.memory_budget,
                                                             'train',
                                                             transform=dataset.train_transform,
                                                             loader=loader,
                                                             shuffle_idx=shuffle_idx,
Ejemplo n.º 2
0
# Fix the seed.
args.seed = seed
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)

# Loader used for training data
shuffle_idx = shuffle(np.arange(dataset.classes), random_state=args.seed)
dataset.shuffle_data(shuffle_idx)
print("Label shuffled")
# print(shuffle_idx)

myModel = networks.ModelFactory.get_model(args.dataset)
myModel = torch.nn.DataParallel(myModel).cuda()

incremental_loader = data_handler.IncrementalLoader(dataset, args)
result_loader = data_handler.ResultLoader(dataset, args)
result_loader.reset()

# Get the required model
print(torch.cuda.device_count())
if torch.cuda.device_count() > 1:
    print("Let's use", torch.cuda.device_count(), "GPUs!")

# Trainer object used for training
myTrainer = trainer.TrainerFactory.get_trainer(incremental_loader, myModel,
                                               args)

schedule = np.array(args.schedule)
tasknum = (dataset.classes - args.base_classes) // args.step_size + 1
total_epochs = args.nepochs
Ejemplo n.º 3
0
import numpy as np
import torch as t
import data_handler

from trainer.evaluator import softmax_evaluator

t.manual_seed(100)
the_model = t.load('./checkpoint/0220_12-59-07.pth')
the_model.to(t.device('cuda'))
# print(the_model.parameters)
the_model.eval()
softmax = softmax_evaluator(True)
dataset = data_handler.DatasetFactory.get_dataset('CIFAR100')
test_dataset_loader = data_handler.IncrementalLoader(
    dataset.test_data.test_data,
    dataset.test_data.test_labels,
    dataset.labels_per_class_test,
    dataset.classes, [1, 2],
    transform=dataset.test_transform,
    cuda=True,
    oversampling=False)

print(input)
output = the_model(input)
pred = output.data.max(
    1, keepdim=True)[1]  # get the index of the max log-probability
print(pred)
Ejemplo n.º 4
0
args.alpha = args.alphas[0]
# Run an experiment corresponding to every memory budget  根据每个内存预算运行一个实验
args.memory_budget = args.memory_budgets[0]
# In LwF, memory_budget is 0 (See the paper "Learning without Forgetting" for details).
if args.lwf:
    args.memory_budget = 0
# Fix the seed.
torch.manual_seed(args.seed)  # 随机数种子,当使用随机数时,关闭进程后再次生成和上次得一样
if args.cuda:
    torch.cuda.manual_seed(args.seed)

# Loader used for training data  # 训练数据加载器
train_dataset_loader = data_handler.IncrementalLoader(dataset.train_data.train_data,
                                                      dataset.train_data.train_labels,
                                                      dataset.labels_per_class_train,
                                                      dataset.classes, [1, 2],
                                                      transform=dataset.train_transform,
                                                      cuda=args.cuda, oversampling=args.upsampling,
                                                      )
# Special loader use to compute ideal NMC; i.e, NMC that using all the data points to compute the mean embedding
train_dataset_loader_nmc = data_handler.IncrementalLoader(dataset.train_data.train_data,
                                                          dataset.train_data.train_labels,
                                                          dataset.labels_per_class_train,
                                                          dataset.classes, [1, 2],
                                                          transform=dataset.train_transform,
                                                          cuda=args.cuda, oversampling=args.upsampling,
                                                          )
# Loader for test data. # 测试数据加载器
test_dataset_loader = data_handler.IncrementalLoader(dataset.test_data.test_data,
                                                     dataset.test_data.test_labels,
                                                     dataset.labels_per_class_test, dataset.classes,