Пример #1
0
    def get_model(final_model_id, test_data_id):
        # Load the test model
        test_net = network.Net(args)
        checkpoint_test = torch.load(
            os.path.join(args.checkpoint,
                         'model_{}.pth.tar'.format(test_data_id)))
        test_net.load_state_dict(checkpoint_test['model_state_dict'])

        # Load your final trained model
        net = network.Net(args)
        checkpoint = torch.load(
            os.path.join(args.checkpoint,
                         'model_{}.pth.tar'.format(final_model_id)))
        net.load_state_dict(checkpoint['model_state_dict'])

        # # Change the shared module with the final model's shared module
        final_shared = deepcopy(net.shared.state_dict())
        test_net.shared.load_state_dict(final_shared)
        test_net = test_net.to(args.device)

        return test_net
Пример #2
0
def run(args, run_id):

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)

        # Faster run but not deterministic:
        # torch.backends.cudnn.benchmark = True

        # To get deterministic results that match with paper at cost of lower speed:
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

    # Data loader
    print('Instantiate data generators and model...')
    dataloader = datagenerator.DatasetGen(args)
    args.taskcla, args.inputsize = dataloader.taskcla, dataloader.inputsize
    if args.experiment == 'multidatasets': args.lrs = dataloader.lrs

    # Model
    net = network.Net(args)
    net = net.to(args.device)

    net.print_model_size()
    # print (net)

    # Approach
    appr=approach(net,args,network=network)

    # Loop tasks
    acc=np.zeros((len(args.taskcla),len(args.taskcla)),dtype=np.float32)
    lss=np.zeros((len(args.taskcla),len(args.taskcla)),dtype=np.float32)

    for t,ncla in args.taskcla:

        print('*'*250)
        dataset = dataloader.get(t)
        print(' '*105, 'Dataset {:2d} ({:s})'.format(t+1,dataset[t]['name']))
        print('*'*250)

        # Train
        appr.train(t,dataset[t])
        print('-'*250)
        print()

        for u in range(t+1):
            # Load previous model and replace the shared module with the current one
            test_model = appr.load_model(u)
            test_res = appr.test(dataset[u]['test'], u, model=test_model)

            print('>>> Test on task {:2d} - {:15s}: loss={:.3f}, acc={:5.1f}% <<<'.format(u, dataset[u]['name'],
                                                                                          test_res['loss_t'],
                                                                                          test_res['acc_t']))


            acc[t, u] = test_res['acc_t']
            lss[t, u] = test_res['loss_t']


        # Save
        print()
        print('Saved accuracies at '+os.path.join(args.checkpoint,args.output))
        np.savetxt(os.path.join(args.checkpoint,args.output),acc,'%.6f')

    # Extract embeddings to plot in tensorboard for miniimagenet
    if args.tsne == 'yes' and args.experiment == 'miniimagenet':
        appr.get_tsne_embeddings_first_ten_tasks(dataset, model=appr.load_model(t))
        appr.get_tsne_embeddings_last_three_tasks(dataset, model=appr.load_model(t))

    avg_acc, gem_bwt = utils.print_log_acc_bwt(args.taskcla, acc, lss, output_path=args.checkpoint, run_id=run_id)

    return avg_acc, gem_bwt
Пример #3
0
def run(args, run_id):

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)

        # Faster run but not deterministic:
        # torch.backends.cudnn.benchmark = True

        # To get deterministic results that match with paper at cost of lower speed:
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

    # Data loader
    print('Instantiate data generators and model...')
    dataloader = datagenerator.DatasetGen(args)
    args.taskcla, args.inputsize = dataloader.taskcla, dataloader.inputsize
    if args.experiment == 'multidatasets': args.lrs = dataloader.lrs

    # Model
    net = network.Net(args)
    net = net.to(args.device)

    net.print_model_size()

    grad_learner = GradLearner(args.gl_dim,
                               args.gl_arch,
                               gl_scale=args.gl_scale,
                               gl_loss_scale=args.gl_loss_scale)
    grad_learner = grad_learner.to(args.device)
    output_str = '====>Total learner params: {:.2f}M + {}'.format(
        sum(p.numel() for p in net.parameters()) / 1000000.0,
        sum(p.numel() for p in grad_learner.parameters()))
    print(output_str)

    # Approach
    appr = approach(net,
                    args,
                    network=network,
                    extradata=None,
                    gl_start_predict=args.gl_start_predict,
                    gl_prob=args.gl_prob,
                    extra_data_name=args.extra_data_name,
                    grad_learner=grad_learner)

    # Loop tasks
    acc = np.zeros((len(args.taskcla), len(args.taskcla)), dtype=np.float32)
    lss = np.zeros((len(args.taskcla), len(args.taskcla)), dtype=np.float32)

    for t, ncla in args.taskcla:

        x_in_size = args.extra_input_size
        extradata = None
        if 'tiny' in args.extra_data_name:
            extra_data_dir = os.path.join(args.extra_data, 'train')
            normalize = torchtransforms.Normalize(mean=[0.485, 0.456, 0.406],
                                                  std=[0.229, 0.224, 0.225])
            extra_data_loader = torchdata.DataLoader(
                torchdatasets.ImageFolder(
                    extra_data_dir,
                    torchtransforms.Compose([
                        torchtransforms.RandomCrop((60, 60)),
                        torchtransforms.Resize(x_in_size),
                        torchtransforms.RandomHorizontalFlip(),
                        torchtransforms.ToTensor(),
                        normalize,
                    ])),
                batch_size=args.extra_batch,
                shuffle=True,
                num_workers=2)
            extradata = iter(cycle(extra_data_loader))
        elif 'coco' in args.extra_data_name:
            normalize = torchtransforms.Normalize(mean=[0.485, 0.456, 0.406],
                                                  std=[0.229, 0.224, 0.225])
            extra_tfms = torchtransforms.Compose([
                torchtransforms.Resize(256),
                torchtransforms.RandomCrop(224),
                torchtransforms.Resize(x_in_size),
                torchtransforms.RandomHorizontalFlip(),
                torchtransforms.ToTensor(),
                normalize,
            ])
            extra_dataset = ImageFolder(args.extra_data, extra_tfms)
            extra_data_loader = torch.utils.data.DataLoader(
                extra_dataset,
                batch_size=args.extra_batch,
                shuffle=True,
                num_workers=2,
                pin_memory=True,
                drop_last=False)
            extradata = iter(cycle(extra_data_loader))
        elif 'cub' in args.extra_data_name:
            extraset = fg_datasets.CUB(input_size=x_in_size,
                                       root=args.extra_data,
                                       is_train=True)
            extra_data_loader = data.DataLoader(extraset,
                                                batch_size=args.extra_batch,
                                                shuffle=True,
                                                num_workers=2,
                                                drop_last=False)
            extradata = iter(cycle(extra_data_loader))
        elif 'car' in args.extra_data_name:
            dataset_name = 'car'
            extraset = fg_datasets.STANFORD_CAR(input_size=x_in_size,
                                                root=args.extra_data,
                                                is_train=True)
            extra_data_loader = data.DataLoader(extraset,
                                                batch_size=args.extra_batch,
                                                shuffle=True,
                                                num_workers=2,
                                                drop_last=False)
            extradata = iter(cycle(extra_data_loader))
        elif 'aircraft' in args.extra_data_name:
            dataset_name = 'Aircraft'
            extraset = fg_datasets.FGVC_aircraft(input_size=x_in_size,
                                                 root=args.extra_data,
                                                 is_train=True)
            extra_data_loader = data.DataLoader(extraset,
                                                batch_size=args.extra_batch,
                                                shuffle=True,
                                                num_workers=2,
                                                drop_last=False)
            extradata = iter(cycle(extra_data_loader))
        appr.extradata = extradata

        print('*' * 250)
        dataset = dataloader.get(t)
        print(' ' * 105, 'Dataset {:2d} ({:s})'.format(t + 1,
                                                       dataset[t]['name']))
        print('*' * 250)

        # Train
        trval_stats = appr.train(t, dataset[t])
        print('-' * 250)
        print()

        for u in range(t + 1):
            # Load previous model and replace the shared module with the current one
            test_model = appr.load_model(u)
            test_res = appr.test(dataset[u]['test'], u, model=test_model)

            print(
                '>>> Test on task {:2d} - {:15s}: loss={:.3f}, acc={:5.1f}% <<<'
                .format(u, dataset[u]['name'], test_res['loss_t'],
                        test_res['acc_t']))

            acc[t, u] = test_res['acc_t']
            lss[t, u] = test_res['loss_t']

        # Save
        print()
        print('Saved accuracies at ' +
              os.path.join(args.checkpoint, args.output))
        np.savetxt(os.path.join(args.checkpoint, args.output), acc, '%.6f')

    # Extract embeddings to plot in tensorboard for miniimagenet
    if args.tsne == 'yes' and args.experiment == 'miniimagenet':
        appr.get_tsne_embeddings_first_ten_tasks(dataset,
                                                 model=appr.load_model(t))
        appr.get_tsne_embeddings_last_three_tasks(dataset,
                                                  model=appr.load_model(t))

    avg_acc, gem_bwt = utils.print_log_acc_bwt(args.taskcla,
                                               acc,
                                               lss,
                                               output_path=args.checkpoint,
                                               run_id=run_id)

    return avg_acc, gem_bwt