def load_step_sfcn_preloaded(run,
                             task,
                             bin_min,
                             bin_max,
                             reinit_with_scaling=None):

    model = give_pretrained_sfcn(run, task)
    model.module.change_output_dim(bin_max - bin_min)
    if reinit_with_scaling is not None:
        model.module.reinit_final_layers_pres_scale(reinit_with_scaling)
    return model
Esempio n. 2
0
def load_final_sfcn_preloaded(run,
                              task,
                              bin_min,
                              bin_max,
                              reinit_with_scaling=None):

    model = give_pretrained_sfcn(run, task)
    new_output_dim = bin_max - bin_min
    old_output_dim = model.module.output_dim

    if new_output_dim != old_output_dim:
        print("Output dimension is changed. Reshape final layer.")
        model.module.change_output_dim(new_output_dim)
    else:
        print("Output dimensions match. No reshaping of final layer.")

    if reinit_with_scaling is not None:
        model.module.reinit_final_layers_pres_scale(reinit_with_scaling)
    return model
Esempio n. 3
0
def test_sfcn_preloaded(test_loader,hps): 
    
    model,bin_min,bin_max=give_pretrained_sfcn(hps['run'],hps['task'],give_range=True)
    model.module.train_nothing()

    info_start="Full model is being tested with weights loaded from pre-trained model."
    info_end="Full model was being tested with weights loaded from pre-trained model."
    
    return sfcn_test(model,   
                        test_loader=test_loader, 
                        bin_min=bin_min, 
                        bin_max=bin_max,
                        space=hps['space'],
                        loss_met=hps['loss_met'],
                        eval_met=hps['eval_met'],
                        bin_step=hps['bin_step'],
                        sigma=hps['sigma'],
                        n_epochs=hps['n_epochs'],
                        print_corr=hps['print_corr'],
                        info_start=info_start,
                        info_end=info_end)
Esempio n. 4
0
    x = Variable(x, requires_grad=True)
    model.module.train_nothing()
    model.eval()
    loss_list = []
    for it in range(n_epochs):
        loss = compute_activation(model, x, filter_index)
        loss.backward()
        x.data += lr * x.grad.data - alpha * x.data
        x.grad.zero_()
        if it % print_every == 0:
            print("Epoch: %3d || Activ: %.6f" % (it, loss))
        loss_list.append(loss)
    return (x, loss_list)


model = give_pretrained_sfcn("0", "age")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Number of GPUs: ", torch.cuda.device_count())
model = model.to(device)

_, train_loader = give_oasis_data('train',
                                  batch_size=1,
                                  num_workers=4,
                                  shuffle=True,
                                  debug='debug',
                                  preprocessing='min',
                                  task='age',
                                  share=1.)

n_it = 100
lr_list = [0.1, 1., 10., 100., 0.1]
Esempio n. 5
0
                                    num_workers=4,
                                    shuffle=True,
                                    debug=False,
                                    preprocessing='min',
                                    task=label,
                                    share=1.0,
                                    balance=False)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("Number of GPUs: ", torch.cuda.device_count())

    for method in methods:
        for run in runs:
            for pretrained_task in pretrained_tasks:

                model = give_pretrained_sfcn(run, pretrained_task)
                model.eval()
                model.module.train_nothing()
                model = model.to(device)

                if ARGS['DEBUG'] == 'debug':
                    n_samples = 10
                elif ARGS['DEBUG'] == 'full':
                    n_samples = min(train_dataset._len, ARGS['N_SAMPLES'])
                else:
                    sys.exit("Unknown debug flag. Either 'debug' or 'full'.")

                #Get random subset of indices of the dataset:
                size_dataset = train_dataset._len
                inds = torch.randperm(size_dataset)[:n_samples].tolist()
                n_final_channels = model.module.channel_number[-1]