コード例 #1
0
ファイル: uncert_eval.py プロジェクト: kleinicke/cINN_on_OCO2
def compute_uncert(x, y, load=False, save_name="test"):
    params = prepare_data.x_to_params(x)

    print("Starts to sample posteriors for calibration errors")

    t = time.perf_counter()
    predictions = model.fn_func(y_all.to(c.device)).detach().cpu().numpy()
    x_pred = predictions[:, ::2]
    uncert_x_pred = np.sqrt(np.exp(predictions[:, 1::2]))
    params_pred = prepare_data.x_to_params(x_pred)
    uncert_params_pred = prepare_data.x_to_params(uncert_x_pred, no_mu=True)
    diff = params_pred - params
    print("mean denorm", np.mean(diff, axis=0))
    print("absmean denorm", np.mean(np.abs(diff), axis=0))
    print(f"sample and x to params time: {time.perf_counter() - t:.2f}s")
    print(
        f"Total time to compute inlier matrix: {time.perf_counter() - t:.2f}s")
    calibration = diff / uncert_params_pred
    print("\n Predictions")
    print(predictions[:, 1])
    print(predictions[:, 1].max(), predictions[:, 1].min())
    print("\n Exp Predictions")
    print(np.exp(predictions[:, 1]))
    print(np.exp(predictions[:, 1]).max(), np.exp(predictions[:, 1]).min())

    return params, x_pred, uncert_x_pred, params_pred, uncert_params_pred, diff, calibration
コード例 #2
0
def show_prediction_changes():
    import help_train as ht
    torch.manual_seed(71)
    years = [2014, 2015, 2016, 2017, 2018]
    param_gt_mean = []
    output_param_mean = []
    #batch_size = 512
    #for _, year in enumerate(years):
    for i, loadset in enumerate(dataloader.loader):
        year = c.dc.viz_years[i]
        #sets = dataloader.loadOCODataset(year = [year], analyze=True, noise=False)
        #loadset = dataloader.DataLoader(sets,
        #    batch_size=batch_size, shuffle=True, drop_last=True, num_workers = 1)
        x, y, _ = data_helpers.concatenate_set(loadset, 1000)
        outputs = ht.sample_posterior(y, x)
        output = torch.mean(torch.FloatTensor(outputs), dim=1)
        #print(output.size())
        #with torch.no_grad():
        #    output = feature_net.model.fn_func(y.to(c.device)).detach().cpu()

        #errors = ht.show_error(f"{year} eval", visualize=False)
        #errors.add_error(output,x)
        #errors.print_error()
        param_gt = prepare_data.x_to_params(x)
        output_param = prepare_data.x_to_params(output)
        param_gt_mean.append(np.mean(param_gt[:, 0]))
        output_param_mean.append(np.mean(output_param[:, 0]))
    plt.figure("Cmp_pred_true_INN")
    plt.plot(years, param_gt_mean, label="gt")
    plt.plot(years, output_param_mean, label="prediction_INN")
    plt.legend()
    plt.title("Comparison between predicted and true CO2 concentration")
    plt.xlabel("year")
    plt.ylabel("CO2 in ppm")

    plt.figure("Offset_per_year_INN")
    plt.title("Offset per year")
    plt.plot(years,
             np.subtract(output_param_mean, param_gt_mean),
             label="offset")
    plt.xlabel("year")
    plt.ylabel("CO2 in ppm")
    plt.legend()

    plt.figure("Increases_per_year_fn")
    plt.title("Increases per year")
    plt.plot(years[1:],
             np.diff(output_param_mean),
             label="increase prediction")
    plt.plot(years[1:],
             [j - i for i, j in zip(param_gt_mean[:-1], param_gt_mean[1:])],
             label="increase gt")
    plt.xlabel("year")
    plt.ylabel("CO2 in ppm")
    plt.legend()

    plt.show(block=False)
コード例 #3
0
ファイル: new_net.py プロジェクト: kleinicke/cINN_on_OCO2
def train_solver(epoch, N_epochs,model=model,viz=False):
    model.train()
    start_t = time.perf_counter()
    #raw_index = c.ana_names.index("xco2_raw")
    #errors = ht.show_error("train")#, mode = "train")
    for batch_idx, (x, y, ana) in enumerate(train_loader):
        
        y, x = y.to(c.device), x.to(c.device)

        optim.zero_grad()
        model.zero_grad()

        output = model.fn_func(y)
        #print(output.shape,x.shape)
        if c.predict_uncert:
            xi = output[:,0]
            s = output[:,1]
            loss_a = (xi-x[:,0])**2/(2*s**2)+0.5*torch.log(s**2)
            loss = torch.mean(loss_a)
        else:
            loss_a = (output[:,:] - x[:,:])**2

            loss = torch.mean(loss_a)
        loss.backward()
        loss_a = torch.mean(loss_a, dim = 0)
        #print(loss_a)

        #errors.add_error(output,x)

        optim_step()

        #print trainloss
        if batch_idx % c.fn_pretrain_log_interval == 0:
            print(f'\rTrain Epoch: {epoch}/{N_epochs-1} [{batch_idx * len(x)}/{len(train_loader.dataset)}' 
                f'({100. * batch_idx / len(train_loader):.0f}%)]\tLoss: {loss.item():.6f} Time: {time.perf_counter() - start_t:.1f}', end='')
        
        ##assert not torch.isnan(torch.sum(output)), (loss_a,x,y)

        #print testloss
        if (batch_idx) % int(len(train_loader.dataset)/len(x)/c.fn_pretrain_number_of_tests) == int(len(train_loader.dataset)/len(x)/c.fn_pretrain_number_of_tests)-1:
            print(f"\nTrain_loss {loss_a.detach().cpu().numpy()}")
            difference = prepare_data.x_to_params(output.detach().cpu().numpy()) - prepare_data.x_to_params(x.detach().cpu().numpy())
            tot_difference = np.mean(np.abs(difference), axis = 0)
            rel_difference = np.mean((difference), axis = 0)
            print(f"Train errors {tot_difference} and with mean at {rel_difference}")
            mean_er = torch.mean(torch.abs(output - x), dim = 0)
            print("mean error train",mean_er.detach().cpu().numpy()) 
            #errors.print_error()   
            break   
    return loss.item()
コード例 #4
0
ファイル: plot_data.py プロジェクト: kleinicke/cINN_on_OCO2
def plot():
    #for i in range(len(params[0])):
    #    plot_quality_map(params[:,i],name = params_names[i])
    show_co2_change()
    params = prepare_data.x_to_params(x_all)
    specs = prepare_data.y_to_spectra(y_all)
    #plot_quality_map(params[:,co2_pos])
    show_param_hists(x_all, name="whightened")
    show_param_hists(params, name="orig")
    last_specs = specs[:, -20:]
    last_specs[:, 6] /= 12
    show_param_hists(last_specs,
                     name="spec-20",
                     n_parameters=20,
                     elem_names=prepare_data.spectra_names[-20:])
    show_param_hists(y_all[:, -20:],
                     name="y-20",
                     n_parameters=20,
                     elem_names=prepare_data.spectra_names[-20:])
    #show_elementcorrelations()
    #spectra_positions = [0,spec_length,2*spec_length]
    #for i in range(params_in_spectrum):
    #    spectra_positions.append(3*spec_length+i)

    #show_elementcorrelations(spectra[:,spectra_positions],len(prepare_data.spectra_names),"external params",params_names=prepare_data.spectra_names[:])
    #show_one_input_data()
    show_spectra()
コード例 #5
0
ファイル: nice_eval.py プロジェクト: kleinicke/cINN_on_OCO2
def compute_calibration(x, y, load=False, save_name="test"):
    params = prepare_data.x_to_params(x)
    import time

    n_steps = 100
    q_values = []
    confidences = np.linspace(0., 1., n_steps + 1, endpoint=True)[1:]
    for conf in confidences:

        q_low = 0.5 * (1 - conf)
        q_high = 0.5 * (1 + conf)
        q_values += [q_low, q_high]
    print("Starts to sample posteriors for calibration errors")

    t = time.perf_counter()
    posterior = sample_posterior(y)
    diff = torch.mean(torch.FloatTensor(posterior), dim=1) - x

    print("meandiff direct", torch.mean(diff, dim=0))
    print("absmeandiff direct", torch.mean(torch.abs(diff), dim=0))

    posterior = prepare_data.x_to_params(posterior)
    diff = np.mean(posterior, axis=1) - params
    print("mean denorm", np.mean(diff, axis=0))
    print("absmean denorm", np.mean(np.abs(diff), axis=0))
    diff3 = np.median(posterior, axis=1) - params
    print("meandiff denorm median", np.mean(diff3, axis=0))
    print("absmeandiff denorm median", np.mean(np.abs(diff3), axis=0))
    print(f"sample and x to params time: {time.perf_counter() - t:.2f}s")

    quantile = (np.quantile(posterior[:, :, :], q_values, axis=1))

    uncert_intervals = (quantile[1::2, :, :] - quantile[0::2, :, :])

    print(
        f"Total time to compute inlier matrix: {time.perf_counter() - t:.2f}s")

    inliers = ((np.logical_and(np.less(params, quantile[1::2, :, :]),
                               np.greater(params, quantile[0::2, :, :]))))
    uncert_error_co2 = uncert_intervals[68, :, c.co2_pos] / 2
    uncert_error = uncert_intervals[68, :, :] / 2
    #Calculate error of INN
    #error_INN = (quantile[0,:,:]-gt_params)
    error_INN = (posterior.mean(axis=1) - params)
    return inliers, quantile, uncert_intervals, confidences, params, posterior, uncert_error_co2, uncert_error, error_INN
コード例 #6
0
def show_error_correlations(x=x_all, y=y_all, load=False):

    #plot error co2 against: xco2, "albedo_o2","albedo_sco2","albedo_wco2", "tcwv" 4
    #and: "year","xco2_apriori","altitude","psurf","t700","longitude","latitude" 7
    #"snr_wco2","snr_sco2","snr_o2a","aod_bc","aod_dust","aod_ice","aod_oc","aod_seasalt","aod_sulfate","aod_total","aod_water"

    params = prepare_data.x_to_params(x)
    spectra = prepare_data.y_to_spectra(y)

    post = sample_posterior(y)
    diff = torch.mean(torch.FloatTensor(post), dim=1) - x
    #post_params =prepare_data.x_to_params(post)
    _, _, uncert_intervals, _, post_params = compute_calibration(x,
                                                                 y,
                                                                 load=load)
    uncert_error_co2 = uncert_intervals[68, :, c.co2_pos] / 2
    diff = np.mean(post_params, axis=1) - params
    diff_co2 = diff[:, 0]
    error_name = ['error_correlations', 'estimated_error_correlations']
    for l, spectra in enumerate([spectra, np.array(y)]):
        if l == 1:
            params = np.array(x)
        for k, diff in enumerate([diff_co2, uncert_error_co2]):

            plt.figure(error_name[k] + f'_{l}', figsize=(20, 15))
            plt.title(error_name[k] + f'_{l}')
            print(diff.shape)
            horizontal_figures = 4
            vertical_figures = 6
            diff = np.clip(diff, -4, 4)
            for i in range(horizontal_figures):
                ax = plt.subplot(horizontal_figures, vertical_figures,
                                 vertical_figures * i + 1)

                bins = np.linspace(np.min(diff), np.max(diff), 100)

                plt.hist(diff,
                         bins=bins,
                         histtype='step',
                         color="lightskyblue",
                         orientation="horizontal")

                if i > 0:
                    #ax.axis('off')
                    ax.set_xticks([])
                plt.ylabel(f"error of prediction in ppm")
            """
コード例 #7
0
def train_solver(epoch, N_epochs, model=model, vizual=False):
    model.train()
    model2.train()
    start_t = time.perf_counter()
    #raw_index = c.ana_names.index("xco2_raw")
    errors = ht.show_error("train")  #,loss_fnc= loss_sub, mode = "train")
    errors2 = ht.show_error(
        "trainuncert", uncertainty=True)  #,loss_fnc= loss_sub, mode = "train")
    for batch_idx, (x, y, ana) in enumerate(train_loader):

        y, x = y.to(c.device), x.to(c.device)

        optim.zero_grad()
        model.zero_grad()
        #print(y.shape)
        #print("y",y.shape)
        output = model.fn_func(y)
        #print(output.shape,x.shape)
        loss, loss_a = create_loss(output, x, loss_a_use=True)
        loss.backward()
        #print(loss_a)
        if c.train_uncert:
            output = output[:, ::2]
        ####errors.add_error(output,x)
        for i in range(output.shape[0]):
            #print(test_output[i],x_test[i])
            errors.add_error(output[i][None, :], x[i][None, :])

        optim_step()

        if two_nets:
            optim2.zero_grad()
            model2.zero_grad()
            output2 = model2.fn_func(y)
            #print(output2.shape,x.shape,y.shape)
            loss2, loss_a2 = create_loss(output2,
                                         x,
                                         loss_a_use=True,
                                         predict_uncert=True)
            loss2.backward()
            for i in range(output2.shape[0]):
                #print(test_output[i],x_test[i])
                #print(output2.shape)
                errors2.add_error(output2[i][None, ::2], x[i][None, :],
                                  output2[i][None, 1::2])
                #assert 0

            optim2.step()

        #print trainloss
        if batch_idx % c.fn_pretrain_log_interval == 0:
            if two_nets:
                print(
                    f'\rTrain Epoch: {epoch}/{N_epochs-1} [{batch_idx * len(x)}/{len(train_loader.dataset)}'
                    f'({100. * batch_idx / len(train_loader):.0f}%)]\tLoss: {loss.item():.6f},{loss2.item():.6f}, {torch.mean(loss_a2).item():.4f} Time: {time.perf_counter() - start_t:.1f}',
                    end='')
            else:
                print(
                    f'\rTrain Epoch: {epoch}/{N_epochs-1} [{batch_idx * len(x)}/{len(train_loader.dataset)}'
                    f'({100. * batch_idx / len(train_loader):.0f}%)]\tLoss: {loss.item():.6f}, Time: {time.perf_counter() - start_t:.1f}',
                    end='')

        ##assert not torch.isnan(torch.sum(output)), (loss_a,x,y) #torch.mean(loss_a2).detach().cpu().numpy()

        #print testloss
        if (batch_idx) % int(
                len(train_loader.dataset) / len(x) /
                c.fn_pretrain_number_of_tests) == int(
                    len(train_loader.dataset) / len(x) /
                    c.fn_pretrain_number_of_tests) - 1:

            difference = prepare_data.x_to_params(
                output.detach().cpu().numpy()) - prepare_data.x_to_params(
                    x.detach().cpu().numpy())
            difference2 = prepare_data.x_to_params(
                output.detach().cpu().numpy()) - prepare_data.x_to_params(
                    x.detach().cpu().numpy())
            tot_difference = np.mean(np.abs(difference), axis=0)
            rel_difference = np.mean((difference), axis=0)
            print(
                f"Train errors {tot_difference} and with mean at {rel_difference}"
            )
            mean_er = torch.mean(torch.abs(output - x), dim=0)
            print("mean error train", mean_er.detach().cpu().numpy())
            errors.print_error()
            print("new")
            #difference = prepare_data.x_to_params(output.detach().cpu().numpy()) - prepare_data.x_to_params(x.detach().cpu().numpy())
            if two_nets:
                errors2.print_error()
                print(f"\nTrain_loss {loss_a.detach().cpu().numpy()}")
                print(
                    f"\nTrain_loss new {loss2.item():.6f}, {torch.mean(loss_a2).detach().cpu().numpy():.6f}{loss_a2.detach().cpu().numpy()}"
                )
                viz.show_loss(loss2.item(), "trainuncert")
            break

    return loss.item()
コード例 #8
0
ファイル: plot_data.py プロジェクト: kleinicke/cINN_on_OCO2
def show_co2_change():

    #def hist(x):
    #    results = []
    #    h, b = np.histogram(x[:, 0], bins=100, density=True)#range=(-2,2),
    #    h /= np.max(h)
    #    results = [b[:-1],h]
    #    return results

    torch.manual_seed(153)
    plt.figure(f"CO2 concentration", figsize=(10, 7), dpi=100)
    ax = plt.gca()
    means = []
    for _, year in enumerate(dc.viz_years):
        x, y = dataloader.year_sets[year]

        #for _, year in enumerate([2014, 2015, 2016, 2017, 2018]):

        #sets = dataloader.loadOCODataset(year = [year], analyze=True, noise=False)
        #loadset = dataloader.DataLoader(sets,
        #    batch_size=batch_size, shuffle=True, drop_last=True, num_workers = 1)
        #x,_,_ = data_helpers.concatenate_set(loadset,10000)
        params = prepare_data.x_to_params(x)

        #params_hist = hist(params)
        #print(params.min(axis=0))
        #print(params.max(axis=0))
        #plt.subplot(5, 5/5+1, j +1)
        #plt.step(*(params_hist), where='post', label = year)
        color = next(ax._get_lines.prop_cycler)['color']
        #print(np.shape(params_hist))
        sns.set_style("whitegrid")
        sns.kdeplot(params[:, 0], bw=0.2, label=year, color=color)
        mean = np.mean(params[:, 0])
        means.append(mean)
        plt.plot([mean, mean], [0, 0.25], color=color)

    title = "The means of the years were:"
    for mean in means:
        title += f" {mean:.2f},"
    differences = ""
    prev_mean = means[0]
    for mean in means[1:]:
        differences += f"{mean-prev_mean:.2f}, "
        prev_mean = mean
    plt.title(title[:-2])
    plt.legend()
    #plt.tight_layout()
    plt.xlabel(f"CO2 in ppm, differences {differences[:-2]}")

    plt.figure(f"Mean corrected CO2 distribution", figsize=(10, 7), dpi=100)
    ax = plt.gca()
    means = []
    for _, year in enumerate(dc.viz_years):
        x, y = dataloader.year_sets[year]

        #for _, year in enumerate([2014, 2015, 2016, 2017, 2018]):

        #sets = dataloader.loadOCODataset(year = [year], analyze=True, noise=False)
        #loadset = dataloader.DataLoader(sets,
        #    batch_size=batch_size, shuffle=True, drop_last=True, num_workers = 1)
        #x,_,_ = data_helpers.concatenate_set(loadset,10000)
        params = prepare_data.x_to_params(x)

        #params_hist = hist(params)
        #print(params.min(axis=0))
        #print(params.max(axis=0))
        #plt.subplot(5, 5/5+1, j +1)
        #plt.step(*(params_hist), where='post', label = year)
        color = next(ax._get_lines.prop_cycler)['color']
        #print(np.shape(params_hist))
        sns.set_style("whitegrid")
        mean = np.mean(params[:, 0])
        sns.kdeplot(params[:, 0] - mean, bw=0.2, label=year, color=color)
        means.append(mean)
        #plt.plot([mean, mean], [0,0.25], color=color)

    title = "Mean corrected distribution:"
    for mean in means:
        title += f" {mean:.2f},"
    differences = ""
    prev_mean = means[0]
    for mean in means[1:]:
        differences += f"{mean-prev_mean:.2f}, "
        prev_mean = mean
    plt.title(title[:-2])
    plt.legend()
    #plt.tight_layout()
    plt.xlabel(f"Distribution shapes, 2014 and 2019 not complete years")
コード例 #9
0
def show_prediction_changes(model, show_two=False):
    N_post = c.N_post
    z = torch.randn(N_post, c.x_dim).to(c.device)
    model.eval()

    def sample_posterior(y_it, z=z, N=N_post, model=model):
        """[summary]

        Arguments:
            y_it {[type]} -- [description]

        Keyword Arguments:
            z {[type]} -- [description] (default: {z})
            N {[type]} -- [description] (default: {N_post})

        Returns:
            [outputs] -- [sampled]
        """

        outputs = []
        print("samples_posterior", y_it.shape, z.shape)

        for i, y in enumerate(y_it):
            y = y.expand(N, -1)

            with torch.no_grad():
                outputs.append(model.reverse_sample(z, y).data.cpu().numpy())

        return outputs

    import help_train as ht
    three = True
    #import nice_eval as ne
    torch.manual_seed(71)
    years = c.dc.viz_years  #[2014, 2015, 2016, 2017, 2018]
    param_gt_mean = []
    param_gt_mean1 = []
    param_gt_mean2 = []
    param_gt_mean3 = []
    other_gt_mean = []
    output_param_mean = []
    other_param_mean = []
    #batch_size = 512
    #for _, year in enumerate(years):
    for i, year in enumerate(dataloader.dc.viz_years):
        x, y = dataloader.year_sets[year]
        x.to(c.device)
        y.to(c.device)
        print(f"Creates prediction for year {c.dc.viz_years[i]}\n\n\n\n")
        #year = c.dc.viz_years[i]
        #sets = dataloader.loadOCODataset(year = [year], analyze=True, noise=False)
        #loadset = dataloader.DataLoader(sets,
        #    batch_size=batch_size, shuffle=True, drop_last=True, num_workers = 1)
        #x,y,_ = data_helpers.concatenate_set(loadset,1000)'
        #print(i,year_set)
        #x,y = year_set
        print(x.shape, y.shape)
        outputs = sample_posterior(y)
        output = torch.mean(torch.FloatTensor(outputs), dim=1)
        #print(output.size())
        #with torch.no_grad():
        #    output = feature_net.model.fn_func(y.to(c.device)).detach().cpu()

        #errors = ht.show_error(f"{year} eval", visualize=False)
        #errors.add_error(output,x)
        #errors.print_error()
        param_gt = prepare_data.x_to_params(x)
        output_param = prepare_data.x_to_params(output)
        param_gt_mean.append(np.mean(param_gt[:, 0]))
        if three:
            param_gt_mean1.append(np.mean(param_gt[:, 0]))
            param_gt_mean2.append(np.mean(param_gt[:, 1]))
            param_gt_mean3.append(np.mean(param_gt[:, 2]))

        output_param_mean.append(np.mean(output_param[:, 0]))
        if show_two:
            other_gt_mean.append(np.mean(param_gt[:, 1]))
            other_param_mean.append(np.mean(output_param[:, 1]))

    param_gt_mean1 = np.array(param_gt_mean1)
    param_gt_mean2 = np.array(param_gt_mean2)
    param_gt_mean3 = np.array(param_gt_mean3)

    plt.figure("Cmp_pred_true_INN")
    plt.plot(years, param_gt_mean, label="gt")
    plt.plot(years, output_param_mean, label="prediction_INN")
    plt.legend()
    plt.title("Comparison between predicted and true CO2 concentration")
    plt.xlabel("year")
    plt.ylabel("CO2 in ppm")

    plt.figure("Offset_per_year_INN")
    plt.title("Offset per year")
    plt.plot(years,
             np.subtract(output_param_mean, param_gt_mean),
             label="offset")
    plt.xlabel("year")
    plt.ylabel("CO2 in ppm")
    plt.legend()

    plt.figure("Increases_per_year_fn")
    plt.title("Increases per year")
    plt.plot(years[1:],
             np.diff(output_param_mean),
             label="increase prediction")
    plt.plot(years[1:],
             [j - i for i, j in zip(param_gt_mean[:-1], param_gt_mean[1:])],
             label="increase gt")
    plt.xlabel("year")
    plt.ylabel("CO2 in ppm")
    plt.legend()

    plt.figure("nice_yoy", figsize=(14, 5))
    ax = plt.subplot(1, 2, 1)
    plt.plot(years, param_gt_mean, label="True value apriori")
    plt.plot(years, output_param_mean, label="Prediction apriori")
    if show_two:
        plt.plot(years, other_gt_mean, label="True value CO2")
        #plt.plot(years, other_param_mean, label = "Prediction co2")
    plt.legend(fontsize=18)
    plt.title("Comparison between predicted and true mean CO2 concentrations")
    plt.xlabel("year", fontsize=18)
    plt.ylabel("mean CO2 concentration in ppm", fontsize=18)
    ax = plt.subplot(1, 2, 2)
    plt.title("Differences from true mean", fontsize=18)
    plt.plot(years,
             np.subtract(output_param_mean, param_gt_mean),
             label="apriori")
    if show_two:
        plt.plot(years,
                 np.subtract(other_param_mean, other_gt_mean),
                 label="Measured")
    plt.xlabel("year", fontsize=18)
    plt.ylabel("difference of mean CO2 in ppm", fontsize=18)
    if show_two:
        plt.legend()
    plt.tight_layout()
    if three:
        plt.figure("raw_aprioi_xco2", figsize=(14, 5))
        ax = plt.subplot(1, 2, 1)
        plt.title("Mean Concentrations", fontsize=18)
        plt.plot(years, param_gt_mean1, label="A Priori CO2")
        plt.plot(years, param_gt_mean2, label="Raw CO2")
        plt.plot(years, param_gt_mean3, label="Correct CO2")
        plt.xlabel("year", fontsize=18)
        plt.ylabel("CO2 in ppm", fontsize=18)
        plt.legend(fontsize=18)

        ax = plt.subplot(1, 2, 2)
        plt.title("Differences in Concentrations", fontsize=18)
        plt.plot(years,
                 param_gt_mean1 - param_gt_mean2,
                 label="A Priori - Raw")
        plt.plot(years, param_gt_mean3 - param_gt_mean2, label="Correct - Raw")
        plt.plot(years,
                 param_gt_mean3 - param_gt_mean1,
                 label="Correct - A Priori")
        plt.xlabel("year", fontsize=18)
        plt.ylabel("CO2 in ppm", fontsize=18)
        plt.legend(fontsize=18)
コード例 #10
0
ファイル: evaluate.py プロジェクト: kleinicke/cINN_on_OCO2
def uncert(model2=model, name="CO2", position=position):
    x2 = model2.fn_func(y_all.to(c.device)).detach().cpu().numpy()
    params2 = prepare_data.x_to_params(x2[:, ::2])[:, 0]
    params_gt = prepare_data.x_to_params(x_all)[:, 0]
    errors2 = params2 - params_gt
    uncert2 = prepare_data.x_to_params(np.sqrt(np.exp(x2[:, 1::2])),
                                       no_mu=True)[:, 0]
    calib2 = errors2 / uncert2

    plt.figure(f"Nice_uncerts_{name}", figsize=(15, 5))
    #error_INN=np.clip(error_INN,-4,4)

    plot_boarders = max(np.max(errors2), -np.min(errors2))  #4
    bins = np.linspace(-plot_boarders, plot_boarders, 100)
    #error_ret=np.clip(error_ret ,-3,3)

    #Error distribution
    ax = plt.subplot(1, 2, 1)
    (mu_INN, sigma_INN) = norm.fit(errors2)

    errors2_clip = np.clip(errors2, -plot_boarders, plot_boarders)
    uncert2_clip = np.clip(uncert2, -plot_boarders, plot_boarders)
    ax.set_title(
        rf"Error distribution, $\mu$={mu_INN:.2f}, $\sigma$={sigma_INN:.2f}",
        fontsize=20)

    #plt.hist(error_ret, bins=bins, density=False, histtype='step',color="blue",label="retrival")
    plt.hist(errors2_clip,
             bins=bins,
             density=False,
             histtype='step',
             color="blue",
             label="cINN")
    plt.hist(uncert2_clip,
             bins=bins,
             density=False,
             histtype='step',
             label="uncert")

    plt.xlabel("Estimated difference to gt in ppm", fontsize=20)
    plt.ylabel("Number of estimations", fontsize=20)

    ax = plt.subplot(1, 2, 2)
    bins = np.linspace(-4.5, 4.5, 100)
    #rel_error_iNN=np.clip(rel_error_iNN,-4,4)

    #plt.hist(error_ret/dataloader.ret_params[:c.evaluation_samples,1], density=True, bins=bins, histtype='step',color="blue",label="retrival")
    iNN_bins, _, _ = plt.hist(calib2,
                              bins=bins,
                              histtype='step',
                              density=True,
                              color="blue",
                              label="cINN")

    (mu_INN, sigma_iNN) = norm.fit(calib2)
    y_best = scipy.stats.norm.pdf(bins, 0, 1)
    xi2_iNN = 0  #np.sum(np.square(y_best-iNN_bins)/y_best)
    print(xi2_iNN)
    y_INN = scipy.stats.norm.pdf(bins, mu_INN, sigma_iNN)

    l = plt.plot(bins,
                 y_best,
                 color="black",
                 linestyle="--",
                 linewidth=2,
                 label="optimal gaussian")
    l = plt.plot(bins, y_INN, 'b--', linewidth=2, label="gaussian fit on INN")
    #l = plt.plot(bins, y_ret, 'r--', linewidth=2)
    ax.set_title(
        f"mu_INN:{mu_INN:.2f}, sigma_iNN: {sigma_iNN:.2f}",
        fontsize=20)  #mu_ret: {mu_ret:.2f}, sigma_ret: {sigma_ret:.2f},
    c.mu.append(sigma_iNN)
    c.sigma.append(mu_INN)
    plt.legend()
    plt.xlabel("Difference to gt, depending on estimated error", fontsize=20)
    plt.ylabel("Prob. density", fontsize=20)
    plt.tight_layout()
コード例 #11
0
ファイル: evaluate.py プロジェクト: kleinicke/cINN_on_OCO2
def show_feature_net_solution():
    print("show_feature_net_solution")
    orig_prior_hists = hists(prepare_data.x_to_params(x_all))

    x = model.fn_func(y_all.to(c.device)).detach().cpu().numpy()
    #print("co2_results",x[:100,0])
    #print("true_results",x_all[:100,0])

    #y_test = y_all.numpy()+prepare_data.mu_y
    #print(y_all[:10,:10], y_all[:10,-10:])
    y_gt = y_all  #[:n_plots]
    x_gt = x_all
    orig_x_gt = prepare_data.x_to_params(x_gt)
    orig_y_gt = prepare_data.y_to_spectra(y_gt)
    #print(x.shape)
    orig_x = prepare_data.x_to_params(x)
    #print(np.shape(orig_x), np.shape(orig_x_gt))
    #print("\n")
    #plot_world.plot_quality_map((np.abs(orig_x-orig_x_gt)[:,0]),(y_all.detach().cpu().numpy()+prepare_data.mu_y)[:,-2:], "Featurenet prediction")
    plot_helpers.plot_quality_map((np.abs(orig_x - orig_x_gt)[:, 0]), position,
                                  "Error of network prediction")

    #print(x)
    #print(x_gt)
    print("shapes:", orig_x.shape, orig_x_gt.shape)
    show_error_stats(orig_x[:, 0] - orig_x_gt[:, 0], show_nice=True)

    print("show_predicted_error")

    x_uncert = model.fn_func(y_all.to(c.device)).detach().cpu().numpy()[:, 1]
    #compute s to sigma, following the paper
    x_uncert = np.sqrt(np.exp(x_uncert))

    uncert = x_uncert * np.linalg.inv(prepare_data.w_x)[0, 0]

    plot_helpers.plot_quality_map(uncert, position, "Predicted Uncertainty")

    #show_error_stats(uncert, "uncertainty")

    #show_error_stats((orig_x[:,0]-orig_x_gt[:,0])/uncert, "normalized")

    plot_helpers.plot_quality_map(
        np.abs((orig_x[:, 0] - orig_x_gt[:, 0]) / uncert), position,
        "Uncertainty quality")

    #orig_x_gt = orig_x_gt[:n_plots]
    #x_gt = x_gt[:n_plots]
    for i in range(n_plots):
        #print(x_gt[0])
        #print("\n",prepare_datax_to_params(x_gt)[0])
        #print(prepare_datax_to_params(x)[0])
        #print(x[0])

        plt.figure(f"orig_{i}", figsize=(20, 15))
        for j in range(n_x):
            plt.subplot(3, n_x / 4 + 1, j + 1)
            if j == 0:
                plt.step(*(orig_prior_hists[j]),
                         where='post',
                         color='grey',
                         label="prior")
                plt.plot([orig_x_gt[i, j], orig_x_gt[i, j]], [0, 1],
                         color='red',
                         label="ground truth")
                plt.plot([orig_x[i, j], orig_x[i, j]], [0, 1],
                         color='blue',
                         label="predicted value")
                plt.legend()
            else:
                plt.step(*(orig_prior_hists[j]), where='post', color='grey')
                #plt.step(*(hist_i[j+offset]), where='post', color='blue')

                #x_low, x_high = np.percentile(orig_posteriors[i][:,j+offset], [q_low, q_high])
                plt.plot([orig_x_gt[i, j], orig_x_gt[i, j]], [0, 1],
                         color='red')
                plt.plot([orig_x[i, j], orig_x[i, j]], [0, 1], color='blue')
            #plt.plot([orig_y_gt [i,j+offset-18], orig_y_gt[i,j+offset-18]], [0,1], color='orange',alpha=0.5) #is on top of red
            #if j+offset == 14:
            #    x_low=dataloader.ret_params[i,0]-dataloader.ret_params[i,1]
            #    x_high=dataloader.ret_params[i,0]+dataloader.ret_params[i,1]
            #    plt.plot([x_low, x_low], [0,1], color='green')
            #    plt.plot([x_high, x_high], [0,1], color='green')
            plt.xlabel(f"{c.param_names[j]}")
コード例 #12
0
ファイル: nice_eval.py プロジェクト: kleinicke/cINN_on_OCO2
def show_posterior_histograms(params,
                              posteriors,
                              n_plots=n_plots,
                              orig_prior_hists=hists(
                                  prepare_data.x_to_params(x_all))):

    confidence = 0.68
    q_low = 100. * 0.5 * (1 - confidence)
    q_high = 100. * 0.5 * (1 + confidence)

    for i in range(n_plots):

        hist_i = hists(posteriors[i])

        show_orig_number = n_plots - 10
        if i < show_orig_number:
            plt.figure(f"orig_{i}", figsize=(15, 7))
            for j in range(n_x):
                ax = plt.subplot((n_x + 2) / 3, 3, j + 1)
                plt.step(*(orig_prior_hists[j]),
                         where='post',
                         color='grey',
                         label="gt dist. of other samples")
                plt.step(*(hist_i[j]),
                         where='post',
                         color='blue',
                         label="sampled predictions")

                x_low, x_high = np.percentile(posteriors[i][:, j],
                                              [q_low, q_high])

                if j == c.co2_pos:
                    raw_index = prepare_data.ana_names.index("xco2_raw")
                    unc_index = prepare_data.ana_names.index(
                        "xco2_uncertainty")
                    ap_index = prepare_data.ana_names.index("xco2_apriori")

                    if x_low < 100:
                        raw_xco2 = ana_all[i, raw_index] - ana_all[i, ap_index]
                    else:
                        raw_xco2 = ana_all[i, raw_index]
                    #x_low=params[i,j]-ana_all[i,unc_index]
                    #x_high=params[i,j]+ana_all[i,unc_index]
                    ret_uncert = ana_all[i, unc_index]
                    ret_bins = np.linspace(params[i, j] - 3 * ret_uncert,
                                           params[i, j] + 3 * ret_uncert, 100)
                    y_best = scipy.stats.norm.pdf(ret_bins, params[i, j],
                                                  ret_uncert)

                    #scipy.stats.norm.pdf()
                    plt.plot(ret_bins,
                             y_best / y_best.max(),
                             color='brown',
                             label="uncertainty of gt")

                plt.plot([x_low, x_low], [0, 1], color='green')
                plt.plot([x_high, x_high], [0, 1],
                         color='green',
                         label="1 sigma range")
                plt.plot([params[i, j], params[i, j]], [0, 1],
                         color='red',
                         linewidth=2,
                         label="ground truth")
                plt.plot([], [], color='brown', label="uncertainty of gt")

                unit = ""
                if c.param_names[j] == "xco2":
                    unit = " in ppm"
                if c.param_names[j] == "tcwv":
                    unit = r" in kg $m^{-2}$"
                #plt.xlabel(rf"{c.param_names[j]}{unit}")
                ax.set_title(rf"{c.param_names[j]}{unit}", fontsize=17)
            plt.legend(fontsize=13)
            plt.tight_layout()