示例#1
0
def testINN(i_epoch):
    test_loss=[]
    print("interesting observations")
    print("FN_model_train_mode before test call",nets.model.training)
    print("INN_model_train_mode before test call",model.training)
    model.eval()
    print("FN_model_train_mode during test call",nets.model.training)
    print("INN_model_train_mode during test call",model.training)
    print("\n\n\n")
    for x_test,y_test, ana in c.test_ana_loader:
        x_test, y_test = x_test.to(c.device), y_test.to(c.device)
        with torch.no_grad():
            output, jac = model.forward(x_test, y_test)


            zz = torch.sum(output**2, dim=1)

            neg_log_likeli = 0.5 * zz - jac
            test_loss.append(torch.mean(neg_log_likeli).item())

    test_loss=np.mean(np.array(test_loss), axis=0)
    ht.sample_posterior(y_test,x_test, name = "test")

    model.train()
    print(f"Test Loss {i_epoch}: {test_loss}")
    print("\nTest loss_train: ",test_loss, f"{time.perf_counter() - t:.2f}s")
    
    viz.show_loss(test_loss,"test")
示例#2
0
def show_prediction_changes():
    import help_train as ht
    torch.manual_seed(71)
    years = [2014, 2015, 2016, 2017, 2018]
    param_gt_mean = []
    output_param_mean = []
    #batch_size = 512
    #for _, year in enumerate(years):
    for i, loadset in enumerate(dataloader.loader):
        year = c.dc.viz_years[i]
        #sets = dataloader.loadOCODataset(year = [year], analyze=True, noise=False)
        #loadset = dataloader.DataLoader(sets,
        #    batch_size=batch_size, shuffle=True, drop_last=True, num_workers = 1)
        x, y, _ = data_helpers.concatenate_set(loadset, 1000)
        outputs = ht.sample_posterior(y, x)
        output = torch.mean(torch.FloatTensor(outputs), dim=1)
        #print(output.size())
        #with torch.no_grad():
        #    output = feature_net.model.fn_func(y.to(c.device)).detach().cpu()

        #errors = ht.show_error(f"{year} eval", visualize=False)
        #errors.add_error(output,x)
        #errors.print_error()
        param_gt = prepare_data.x_to_params(x)
        output_param = prepare_data.x_to_params(output)
        param_gt_mean.append(np.mean(param_gt[:, 0]))
        output_param_mean.append(np.mean(output_param[:, 0]))
    plt.figure("Cmp_pred_true_INN")
    plt.plot(years, param_gt_mean, label="gt")
    plt.plot(years, output_param_mean, label="prediction_INN")
    plt.legend()
    plt.title("Comparison between predicted and true CO2 concentration")
    plt.xlabel("year")
    plt.ylabel("CO2 in ppm")

    plt.figure("Offset_per_year_INN")
    plt.title("Offset per year")
    plt.plot(years,
             np.subtract(output_param_mean, param_gt_mean),
             label="offset")
    plt.xlabel("year")
    plt.ylabel("CO2 in ppm")
    plt.legend()

    plt.figure("Increases_per_year_fn")
    plt.title("Increases per year")
    plt.plot(years[1:],
             np.diff(output_param_mean),
             label="increase prediction")
    plt.plot(years[1:],
             [j - i for i, j in zip(param_gt_mean[:-1], param_gt_mean[1:])],
             label="increase gt")
    plt.xlabel("year")
    plt.ylabel("CO2 in ppm")
    plt.legend()

    plt.show(block=False)
示例#3
0
def trainINN(i_epoch):
    loss_history = []
    data_iter = iter(c.train_ana_loader)

    if i_epoch < 0:
        for param_group in model.optimizer.param_groups:
            param_group['lr'] = c.lr_init * c.lr_reduce_factor
    if i_epoch == 0:
        for param_group in model.optimizer.param_groups:
            param_group['lr'] = c.lr_init

    #print("FN_model_train_mode before train call",nets.model.training)
    print("INN_model_train_mode before train call",model.training)
    for param_group in model.optimizer.param_groups:
            print(f"Start Learningrate for epoch {i_epoch} is {param_group['lr']:.3e}")
    
    print(f"Learningrate for epoch {i_epoch} is {model.scheduler.get_lr()[0]:.3e}")            

    iterator = tqdm.tqdm(enumerate(data_iter),
                        total=min(len(c.train_ana_loader), c.n_its_per_epoch),
                        leave=False,
                        mininterval=1.,
                        disable=(not c.progress_bar),
                        ncols=83)



    model.train()
    for i_batch , (x,cond,_) in iterator:
        cond, x = cond.to(c.device), x.to(c.device)
        model.optimizer.zero_grad()
        if c.do_rev:
            #this condition hasn't been updated for a while. Don't expect this to work
            def sample_outputs(sigma, out_shape, batchsize=4):
                return [sigma * torch.cuda.FloatTensor(torch.Size((batchsize, o))).normal_() for o in out_shape]
            
            z = sample_outputs(1., model.output_dimensions, c.batch_size)
            features = nets.model.features(cond)
            output = model.model(z, features, rev = True)

            #x_gen = model.combined_model.module.reverse_sample(z, cond.cuda())
            jac = model.model.log_jacobian(run_forward=False)
            l = 3.5 * torch.mean((x - output)**2) - torch.mean(jac)#/tot_output_size
        else:
            #default case
            z, jac = model.forward(x, cond)
            zz = torch.sum(z**2, dim=1)
            neg_log_likeli = 0.5 * zz - jac     
            l = torch.mean(neg_log_likeli) #/ tot_output_size

        l.backward()

        model.optim_step()
        loss_history.append([l.item()])
        assert not np.isnan(np.sum(np.array(l.item()))),f"\n loss_history {loss_history}"
        if i_batch+1 >= c.n_its_per_epoch:
            # somehow the data loader workers don't shut down automatically
            try:
                data_iter._shutdown_workers()
            except:
                pass

            iterator.close()
            break
    print(loss_history)
    
    ht.sample_posterior(cond,x, "train")

    epoch_losses = np.mean(np.array(loss_history), axis=0)

    print("Train loss",epoch_losses[0])
    print(epoch_losses.shape)
    print(epoch_losses)

    assert not np.isnan(np.sum(epoch_losses)),loss_history
    viz.show_loss(epoch_losses[0],"train")