Ejemplo n.º 1
0
    def __init__(self, tru_distribution: MatrixParameterDistribution):
        self.tru_distribution = tru_distribution
        self.b_distribution = aug.VectorDistributionFromLambda(
            lambda: np.random.randn(self.tru_distribution.get_dimension()))

        self.tru_mat = self.tru_distribution.convert(self.tru_distribution.matrix_parameters)
        self.tru_aux = self.tru_distribution.convert_auxiliary(self.tru_distribution.matrix_parameters)
        self.energy_norm_squared = lambda x: np.sqrt(np.dot(x, self.tru_mat.apply(x)))
        self.l2_norm_squared = lambda x: np.sqrt(np.dot(x, x))
def main():

    n = 128
    h = 1.0 / (n - 1)
    std_dev = 0.5
    true_a = np.ones((n, n))
    xs = np.arange(1, n - 1) * h
    ys = np.arange(1, n - 1) * h
    grid_x, grid_y = np.meshgrid(xs, ys)
    b_distribution = lambda: (np.cos(2.0 * np.pi * grid_x) * np.sin(
        4.0 * np.pi * grid_y)).flatten()

    params = GridLaplacianParameters2D(true_a)
    hyperparams = GridLaplacianHyperparameters2D(std_dev, h)
    true_mat_dist = GridLaplacianDistribution2D(params, hyperparams)
    problem_def = dgn.ProblemDefinition(true_mat_dist)
    problem_def.b_distribution = aug.VectorDistributionFromLambda(
        b_distribution)  #Set distribution of rhs
    diagnostics = dgn.DiagnosticRun(problem_def)

    # Naive run
    run = dgn.NaiveProblemRun(problem_def)
    run.num_sub_runs = 100 * 100
    diagnostics.add_run(run)

    # Run with basic semi-Bayesian operator augmentation
    run = dgn.AugProblemRun(problem_def)
    run.num_sub_runs = 100
    run.samples_per_sub_run = 100
    diagnostics.add_run(run)

    # Run with energy norm semi-Bayesian operator augmentation
    run = dgn.EnAugProblemRun(problem_def)
    run.num_sub_runs = 100
    run.samples_per_sub_run = 100
    diagnostics.add_run(run)

    # Run with truncated energy norm semi-Bayesian operator augmentation
    run = dgn.TruncEnAugProblemRun(problem_def, 2)
    run.num_sub_runs = 100
    run.samples_per_sub_run = 100
    diagnostics.add_run(run)

    # Run with truncated energy norm semi-Bayesian operator augmentation
    run = dgn.TruncEnAugProblemRun(problem_def, 4)
    run.num_sub_runs = 100
    run.samples_per_sub_run = 100
    diagnostics.add_run(run)

    # Run with accelerated truncated energy norm semi-Bayesian operator augmentation
    run = dgn.TruncEnAugAccelProblemRun(problem_def, 2)
    run.num_sub_runs = 100
    run.samples_per_sub_run = 100
    diagnostics.add_run(run)

    # Run with accelerated truncated energy norm semi-Bayesian operator augmentation
    run = dgn.TruncEnAugAccelProblemRun(problem_def, 4)
    run.num_sub_runs = 100
    run.samples_per_sub_run = 100
    diagnostics.add_run(run)

    # Run with accelerated truncated energy norm semi-Bayesian operator augmentation
    run = dgn.TruncEnAugAccelProblemRun(problem_def, 6)
    run.num_sub_runs = 100
    run.samples_per_sub_run = 100
    diagnostics.add_run(run)

    # Run hard window truncated energy norm operator augmentation
    run = dgn.TruncEnAugProblemRun(problem_def, 2, window_funcs='hard')
    run.num_sub_runs = 100
    run.samples_per_sub_run = 100
    diagnostics.add_run(run)

    run = dgn.TruncEnAugProblemRun(problem_def, 4, window_funcs='hard')
    run.num_sub_runs = 100
    run.samples_per_sub_run = 100
    diagnostics.add_run(run)

    # Run all diagnostics and print results
    diagnostics.run()
    print()
    diagnostics.print_results()
    pickle.dump(diagnostics.results, open('dgn_grid_laplacian2d.pkl', 'wb'))
Ejemplo n.º 3
0
def run_test(tru_graph, runs, samples_per_run, boundary, variance):

    interior = list(set(range(0, len(tru_graph))).difference(boundary))
    interior_dimension = len(interior)

    # Let the right hand side be a complete random realization
    b_dist = lambda: np.random.randn(interior_dimension)
    q_u_dist = aug.IdenticalVectorPairDistributionFromLambda(b_dist)
    q_dist = aug.VectorDistributionFromLambda(b_dist)

    tru_dist = NoisyLaplacianDistribution(tru_graph, interior, variance)
    tru_mat = tru_dist.convert_to_matrix(tru_graph)

    errs_l2_naive_squared = []
    errs_l2_aug_squared = []
    errs_l2_en_aug_squared = []
    errs_energy_naive_squared = []
    errs_energy_aug_squared = []
    errs_energy_en_aug_squared = []

    for run in range(0, runs):

        print(f'Run {run+1} / {runs}...')

        b = b_dist()
        noisy_graph = tru_dist.draw_graph_sample()
        bootstrap_dist = NoisyLaplacianDistribution(noisy_graph, interior, variance)
        noisy_mat = bootstrap_dist.convert_to_matrix(noisy_graph)

        op_Ahat_inv = lambda rhs: spla.spsolve(noisy_mat, rhs)
        op_Ahat = lambda rhs: noisy_mat @ rhs

        tru_solution = spla.spsolve(tru_mat, b)
        naive_solution = op_Ahat_inv(b)

        # Perform simple augmentation using a scaled identity prior on b
        aug_solution = aug.aug(samples_per_run, 1, b,
                               op_Ahat_inv, bootstrap_dist, q_u_dist)
        # Perform energy-augmentation using a scaled identity prior on b
        en_aug_solution = aug.en_aug(samples_per_run, 1, b,
                                     op_Ahat_inv, op_Ahat, bootstrap_dist, q_dist)

        err_naive = naive_solution - tru_solution
        errs_l2_naive_squared.append(np.dot(err_naive, err_naive))
        errs_energy_naive_squared.append(np.dot(err_naive, tru_mat @ err_naive))

        err_aug = aug_solution - tru_solution
        errs_l2_aug_squared.append(np.dot(err_aug, err_aug))
        errs_energy_aug_squared.append(np.dot(err_aug, tru_mat @ err_aug))

        err_en_aug = en_aug_solution - tru_solution
        errs_l2_en_aug_squared.append(np.dot(err_en_aug, err_en_aug))
        errs_energy_en_aug_squared.append(np.dot(err_en_aug, tru_mat @ err_en_aug))

    errs_l2_naive_squared = np.array(errs_l2_naive_squared)
    errs_l2_aug_squared = np.array(errs_l2_aug_squared)
    errs_l2_en_aug_squared = np.array(errs_l2_en_aug_squared)

    errs_energy_naive_squared = np.array(errs_energy_naive_squared)
    errs_energy_aug_squared = np.array(errs_energy_aug_squared)
    errs_energy_en_aug_squared = np.array(errs_energy_en_aug_squared)

    sqrt_runs = np.sqrt(runs)

    print(f'Average naive L2 error squared: {np.mean(errs_l2_naive_squared)} +- '
          f'{2 * np.std(errs_l2_naive_squared) / sqrt_runs}')
    print(f'Average augmented L2 error squared: {np.mean(errs_l2_aug_squared)} +- '
          f'{2 * np.std(errs_l2_aug_squared) / sqrt_runs}')
    print(f'Average energy-augmented L2 error squared: {np.mean(errs_l2_en_aug_squared)} +- '
          f'{2 * np.std(errs_l2_en_aug_squared) / sqrt_runs}')
    print('')
    print(f'Average naive energy error squared: {np.mean(errs_energy_naive_squared)} +- '
          f'{2 * np.std(errs_energy_naive_squared) / sqrt_runs}')
    print(f'Average augmented energy error squared: {np.mean(errs_energy_aug_squared)} +- '
          f'{2 * np.std(errs_energy_aug_squared) / sqrt_runs}')
    print(f'Average energy-augmented energy error squared: {np.mean(errs_energy_en_aug_squared)} +- '
          f'{2 * np.std(errs_energy_en_aug_squared) / sqrt_runs}')
def run_test(tru_a, runs, samples_per_run, std_dev):

    n = tru_a.shape[0]

    xs = np.arange(1, n) / n
    b = np.cos(2.0 * np.pi * xs)

    # Let the right hand side be a complete random realization
    b_dist = lambda: b
    q_u_dist = aug.IdenticalVectorPairDistributionFromLambda(b_dist)
    q_dist = aug.VectorDistributionFromLambda(b_dist)

    tru_dist = GridLaplacianDistribution(tru_a, std_dev)
    tru_mat = GridLaplacianMatrixSample(tru_dist.convert_to_matrix(tru_a))

    errs_l2_naive_squared = []
    errs_l2_aug_squared = []
    errs_l2_en_aug_squared = []
    errs_energy_naive_squared = []
    errs_energy_aug_squared = []
    errs_energy_en_aug_squared = []

    for run in range(0, runs):

        print(f'Run {run+1} / {runs}...')

        b = b_dist()
        noisy_a = tru_dist.draw_a_sample()
        bootstrap_dist = GridLaplacianDistribution(noisy_a, std_dev)
        noisy_mat = GridLaplacianMatrixSample(bootstrap_dist.convert_to_matrix(noisy_a))

        op_Ahat_inv = lambda rhs: noisy_mat.solve(rhs)
        op_Ahat = lambda rhs: noisy_mat.apply(rhs)

        tru_solution = tru_mat.solve(b)
        naive_solution = op_Ahat_inv(b)

        # Perform simple augmentation using a scaled identity prior on b
        aug_solution = aug.aug(samples_per_run, 1, b,
                               op_Ahat_inv, bootstrap_dist, q_u_dist)
        # Perform energy-augmentation using a scaled identity prior on b
        en_aug_solution = aug.en_aug(samples_per_run, 1, b,
                                     op_Ahat_inv, op_Ahat, bootstrap_dist, q_dist)

        err_naive = naive_solution - tru_solution
        errs_l2_naive_squared.append(np.dot(err_naive, err_naive))
        errs_energy_naive_squared.append(np.dot(err_naive, tru_mat.apply(err_naive)))

        err_aug = aug_solution - tru_solution
        errs_l2_aug_squared.append(np.dot(err_aug, err_aug))
        errs_energy_aug_squared.append(np.dot(err_aug, tru_mat.apply(err_aug)))

        err_en_aug = en_aug_solution - tru_solution
        errs_l2_en_aug_squared.append(np.dot(err_en_aug, err_en_aug))
        errs_energy_en_aug_squared.append(np.dot(err_en_aug, tru_mat.apply(err_en_aug)))

    errs_l2_naive_squared = np.array(errs_l2_naive_squared)
    errs_l2_aug_squared = np.array(errs_l2_aug_squared)
    errs_l2_en_aug_squared = np.array(errs_l2_en_aug_squared)

    errs_energy_naive_squared = np.array(errs_energy_naive_squared)
    errs_energy_aug_squared = np.array(errs_energy_aug_squared)
    errs_energy_en_aug_squared = np.array(errs_energy_en_aug_squared)

    sqrt_runs = np.sqrt(runs)

    print(f'Average naive L2 error squared: {np.mean(errs_l2_naive_squared)} +- '
          f'{2 *np.std(errs_l2_naive_squared) / sqrt_runs}')
    print(f'Average augmented L2 error squared: {np.mean(errs_l2_aug_squared)} +- '
          f'{2 * np.std(errs_l2_aug_squared) / sqrt_runs}')
    print(f'Average energy-augmented L2 error squared: {np.mean(errs_l2_en_aug_squared)} +- '
          f'{2 *np.std(errs_l2_en_aug_squared) / sqrt_runs}')
    print('')
    print(f'Average naive energy error squared: {np.mean(errs_energy_naive_squared)} +- '
          f'{2 *np.std(errs_energy_naive_squared) / sqrt_runs}')
    print(f'Average augmented energy error squared: {np.mean(errs_energy_aug_squared)} +- '
          f'{2 *np.std(errs_energy_aug_squared) / sqrt_runs}')
    print(f'Average energy-augmented energy error squared: {np.mean(errs_energy_en_aug_squared)} +- '
          f'{2 *np.std(errs_energy_en_aug_squared) / sqrt_runs}')