Esempio n. 1
0
prm.N_Way = 5
prm.K_Shot_MetaTest = 100
prm.data_source = "CIFAR100"
prm.model_name = "CIFARNet"

learning_rate = 1e-3
loss_fn = nn.CrossEntropyLoss()

prm.run_name = "scratch_cifar100_lr_{}_{}_way_{}_shot_nte_{}_epoch_{}".format(learning_rate, prm.N_Way, prm.K_Shot_MetaTest, num_tasks, epoch_num)
prm.result_dir = os.path.join("scratch_log", prm.run_name)
os.makedirs(prm.result_dir,  exist_ok=True)
#create_result_dir(prm)
time_str = datetime.now().strftime(' %Y-%m-%d %H:%M:%S')
write_to_log("Written by scratch.py at {}".format(time_str), prm, mode = "w")

task_generator = Task_Generator(prm)
data_loaders = task_generator.create_meta_batch(prm, num_tasks, meta_split='meta_test')
assert len(data_loaders) == num_tasks

best_accs = []
best_losses = []

for i_task in range(num_tasks):
    #initialize net at start
    net = get_model(prm)
    net = net.cuda()
    optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)

    d = data_loaders[i_task]
    train_loader = d['train']
    test_loader = d['test']
Esempio n. 2
0
if not_freeze_list:
    prm_freeze.not_freeze_list = not_freeze_list
if freeze_list:
    prm_freeze.freeze_list = freeze_list

# For bayes experiment -
prm.log_var_init = {'mean': -10, 'std': 0.1} # The initial value for the log-var parameter (rho) of each weight
prm.n_MC = 1 # Number of Monte-Carlo iterations
prm.test_type = 'MaxPosterior' # 'MaxPosterior' / 'MajorityVote'


# -------------------------------------------------------------------------------------------
#  Run experiments
# -------------------------------------------------------------------------------------------

task_generator = Task_Generator(prm)

test_err_orig = np.zeros(n_experiments)
test_err_scratch = np.zeros(n_experiments)
test_err_scratch_bayes = np.zeros(n_experiments)
test_err_transfer = np.zeros(n_experiments)
test_err_scratch_reg = np.zeros(n_experiments)
test_err_freeze = np.zeros(n_experiments)

for i in range(n_experiments):
    write_to_log('--- Experiment #{} out of {}'.format(i+1, n_experiments), prm)

    # Generate the task #1 data set:
    task1_data = task_generator.get_data_loader(prm)
    n_samples_orig = task1_data['n_train_samples']
Esempio n. 3
0
# MPB alg  params:
prm.kappa_prior = 2e3  #  parameter of the hyper-prior regularization
prm.kappa_post = 1e-3  # The STD of the 'noise' added to prior
prm.delta = 0.1  #  maximal probability that the bound does not hold

init_from_prior = True  #  False \ True . In meta-testing -  init posterior from learned prior

# Test type:
prm.test_type = 'MaxPosterior'  # 'MaxPosterior' / 'MajorityVote' / 'AvgVote'

# path to save the learned meta-parameters
save_path = os.path.join(prm.result_dir, 'model.pt')

#set_trace()
task_generator = Task_Generator(prm)

# -------------------------------------------------------------------------------------------
#  Run Meta-Training
# -------------------------------------------------------------------------------------------

start_time = timeit.default_timer()

if prm.mode == 'MetaTrain':

    n_train_tasks = prm.n_train_tasks
    if n_train_tasks:
        # In this case we generate a finite set of train (observed) task before meta-training.
        # Generate the data sets of the training tasks:
        write_to_log('--- Generating {} training-tasks'.format(n_train_tasks),
                     prm)