예제 #1
0
def main(in_args):
    """
    Main entrypoint for example-generation
    """
    args = parse_args(in_args)
    setup_logging(args.loglevel)
    np.random.seed(args.seed)
    example = args.example.lower()
    num_trials = args.num_trials
    fsize = args.fsize
    linewidth = args.linewidth
    seed = args.seed
    inputdim = args.input_dim
    save = args.save
    alt = args.alt
    bayes = args.bayes
    sample_dist = args.sample_dist
    dist = args.dist
    loc = args.loc
    scale = args.scale
    sample_tol = args.tolerance
    ratio_meas = args.ratio_measure
    sensor_prec = args.precision
    num_measure = args.num_measure

    tolerances = list(np.sort([float(t) for t in sensor_prec]))

    if example == "pde":
        measurements = list(np.sort([int(n) for n in num_measure]))
        if len(measurements) == 0:
            measurements = [100]
    else:
        time_ratios = list(np.sort([float(r) for r in ratio_meas]))
        if len(time_ratios) == 0:
            time_ratios = [1.0]

    _logger.info("Running...")
    if example == "pde":
        lam_true = -3.0
        res = main_pde(
            num_trials=num_trials,
            fsize=fsize,
            seed=seed,
            lam_true=lam_true,
            tolerances=tolerances,
            input_dim=inputdim,
            alt=alt,
            bayes=bayes,
            dist=dist,
            sample_dist=sample_dist,
            sample_tol=sample_tol,
            measurements=measurements,
            loc=loc,
            scale=scale,
        )

        if inputdim == 1:  # TODO: roll this plotting into main_pde, handle w/o fenics?
            plot_scalar_poisson_summary(
                res=res,
                measurements=measurements,
                fsize=fsize,
                prefix=f"figures/pde_{inputdim}D/" + example,
                lam_true=lam_true,
                save=save,
            )
        else:
            # solution / sensors plotted by main_pde method
            pass

        if len(measurements) > 1:
            plot_experiment_measurements(res,
                                         example,
                                         fsize,
                                         linewidth,
                                         save=save)

        if len(tolerances) > 1:
            plot_experiment_equipment(tolerances,
                                      res,
                                      example,
                                      fsize,
                                      linewidth,
                                      save=save)

    elif example == "ode":
        lam_true = 0.5
        res = main_ode(
            num_trials=num_trials,
            fsize=fsize,
            seed=seed,
            lam_true=lam_true,
            tolerances=tolerances,
            alt=alt,
            bayes=bayes,
            time_ratios=time_ratios,
        )

        if len(time_ratios) > 1:
            plot_experiment_measurements(res,
                                         "ode/" + example,
                                         fsize,
                                         linewidth,
                                         save=save,
                                         legend=True)

        if len(tolerances) > 1:
            plot_experiment_equipment(
                tolerances,
                res,
                "ode/" + example,
                fsize,
                linewidth,
                title=
                f"Variance of MUD Error\nfor t={1+2*np.median(time_ratios):1.3f}s",
                save=save,
            )

    elif example in ["linear", "lin"]:
        print("Running Linear Examples.")
        main_lin(in_args)

    elif example in ["monomial", "mon"]:
        print("Running BIP vs SIP Comparison (1D).")
        main_monomial(in_args)
    else:
        raise ValueError("Unsupported example requested.")

    if args.save:
        with open("results.pkl", "wb") as f:
            pickle.dump(res, f)
예제 #2
0
def main_meas_var(args):
    """
    Main entrypoint for High-Dim Linear Measurement Example
    """
    args = parse_args(args)
    setup_logging(args.loglevel)
    np.random.seed(args.seed)
    #     example       = args.example
    #     num_trials   = args.num_trials
    #     fsize        = args.fsize
    #     linewidth    = args.linewidth
    #     seed         = args.seed
    # dim_input     = args.input_dim
    # save         = args.save
    #     alt          = args.alt
    #     bayes        = args.bayes
    #     prefix       = args.prefix
    #     dist         = args.dist

    presentation = False
    save = True

    if not presentation:
        plt.rcParams["mathtext.fontset"] = "stix"
        plt.rcParams["font.family"] = "STIXGeneral"
    fdir = "figures/lin"
    check_dir(fdir)

    fsize = 42

    def numnonzero(x, tol=1e-4):
        return len(x[abs(x) < tol])

    # # Impact of Number of Measurements for Various Choices of $\\Sigma_\text{init}$

    # dim_output = dim_input
    dim_input, dim_output = 4, 2
    # seed = 12
    # np.random.seed(seed)

    # initial_cov = np.diag(np.sort(np.random.rand(dim_input))[::-1] + 0.5)
    initial_cov = np.eye(dim_input)

    plt.figure(figsize=(10, 10))
    # initial_mean = np.zeros(dim_input).reshape(-1, 1)
    # initial_mean = np.random.randn(dim_input).reshape(-1,1)
    # num_obs_list = np.arange(1, 101).tolist()

    lam_ref = np.random.randn(dim_input).reshape(-1, 1)

    prefix = "lin-meas-cov"

    # Ns = [10, 50, 100, 500, 1000]

    sigma = 1e-1
    # np.random.seed(21)
    Ns = np.arange(10, 2001, 50).tolist()
    # Ns = [10, 50, 100, 500, 1000, 5000, 10000]

    num_trials = 50
    # for _ in range(num_trials):

    def A_N(M, N, sigma):
        A = np.sqrt(N) / sigma * M
        return A

    def d_N(M, lam, n):
        d = M @ lam + n
        assert len(d.ravel()) == len(
            n.ravel()
        ), f"Shape mismatch noise={n.shape}, data={d.shape}"
        return d

    def b_N(N, d, sigma):
        b = -1 / np.sqrt(N) * np.sum(np.divide(d, sigma), axis=1)
        return b

    # M = np.random.normal(size=(dim_output, dim_input))
    operator_list, data_list, _ = models.createRandomLinearProblem(
        lam_ref,
        dim_output,
        [max(Ns)] * dim_output,  # want to iterate over increasing measurements
        [0] * dim_output,  # noiseless data bc we want to simulate multiple trials
        dist="norm",
        repeated=True,
    )

    # operator list has dim_output 1xdim_input matrices
    MUD = np.zeros((dim_input, len(Ns), num_trials))
    # M = np.array(operator_list).reshape(dim_output, dim_input)
    noise_draw = [
        np.random.randn(dim_output, max(Ns)) * sigma for _ in range(num_trials)
    ]
    for j, N in enumerate(Ns):
        # _A = A_N(M, N, sigma)
        for i in range(num_trials):
            # _b = b_N(N, d_N(M, lam_ref, noise_draw[i][:, 0:N]), sigma)
            # A, b = transform_linear_setup(operator_list, data_list, sigma)
            A, b, _ = transform_measurements(
                operator_list, data_list, N, sigma, noise_draw[i]
            )
            MUD[:, j, i] = mud_sol(A, b, cov=initial_cov)

    mud_var = MUD.var(axis=2).mean(axis=0)
    plt.plot(Ns, mud_var, label="MUD", c="k", lw=10)

    # plt.title("Precision of MUD Estimates", fontsize=1.25 * fsize)
    plt.yscale("log")
    plt.xscale("log")
    plt.ylabel("Mean Variance of MUD Estimates", fontsize=fsize * 1.25)
    plt.xlabel("Number of Measurements", fontsize=fsize)
    plt.legend()
    # plt.legend(['MUD', 'Least Squares'], fontsize=fsize)
    if save:
        plt.savefig(f"{fdir}/{prefix}-convergence.png", bbox_inches="tight")
        plt.close("all")
    else:
        plt.show()
        plt.close("all")
예제 #3
0
def main_meas(args):
    """
    Main entrypoint for High-Dim Linear Measurement Example
    """
    args = parse_args(args)
    setup_logging(args.loglevel)
    np.random.seed(args.seed)
    #     example       = args.example
    #     num_trials   = args.num_trials
    #     fsize        = args.fsize
    #     linewidth    = args.linewidth
    #     seed         = args.seed
    # dim_input     = args.input_dim
    # save         = args.save
    #     alt          = args.alt
    #     bayes        = args.bayes
    #     prefix       = args.prefix
    #     dist         = args.dist

    presentation = False
    save = True

    if not presentation:
        plt.rcParams["mathtext.fontset"] = "stix"
        plt.rcParams["font.family"] = "STIXGeneral"
    fdir = "figures/lin"
    check_dir(fdir)

    fsize = 42

    def numnonzero(x, tol=1e-4):
        return len(x[abs(x) < tol])

    # # Impact of Number of Measurements for Various Choices of $\\Sigma_\text{init}$

    # dim_output = dim_input
    dim_input, dim_output = 20, 5
    # seed = 12
    # np.random.seed(seed)

    initial_cov = np.diag(np.sort(np.random.rand(dim_input))[::-1] + 0.5)
    # initial_cov = np.eye(dim_input)  # will cause spectrum of updated covariance to contain repeated eigenvalues

    plt.figure(figsize=(10, 10))
    # initial_mean = np.zeros(dim_input).reshape(-1, 1)
    # initial_mean = np.random.randn(dim_input).reshape(-1,1)
    # num_obs_list = np.arange(1, 101).tolist()

    lam_ref = np.random.randn(dim_input).reshape(-1, 1)

    prefix = "lin-meas-cov"

    # Ns = [10, 50, 100, 500, 1000]

    sigma = 1e-1
    # np.random.seed(21)
    # Ns = np.arange(10, 2001, 50).tolist()
    Ns = [10, 100, 1000, 10000]

    # for _ in range(num_trials):

    operator_list, data_list, _ = models.createRandomLinearProblem(
        lam_ref,
        dim_output,
        [max(Ns)] * dim_output,  # want to iterate over increasing measurements
        [0] * dim_output,  # noiseless data bc we want to simulate multiple trials
        dist="norm",
        repeated=True,
    )

    MUD = np.zeros((dim_input, len(Ns)))
    UP = np.zeros((dim_input, len(Ns)))
    noise_draw = np.random.randn(dim_output, max(Ns)) * sigma

    for j, N in enumerate(Ns):
        A, b, _ = transform_measurements(operator_list, data_list, N, sigma, noise_draw)
        MUD[:, j] = mud_sol(A, b, cov=initial_cov)
        up_cov = updated_cov(A, initial_cov)
        up_sdvals = sp.linalg.svdvals(up_cov)
        # print(up_sdvals.shape, dim_input, up_cov.shape)
        UP[:, j] = up_sdvals

    # mud_var = MUD.var(axis=2)
    lines = ["solid", "dashed", "dashdot", "dotted"]

    for p in range(dim_input):
        plt.plot(
            Ns,
            UP[p, :],
            label=f"SV {p}",
            alpha=0.4,
            lw=5,
            ls=lines[p % len(lines)],
        )

    # plt.plot(Ns, mud_var, label='MUD', c='k', lw=10)
    # plt.title("Precision of MUD Estimates", fontsize=1.25 * fsize)
    plt.yscale("log")
    plt.xscale("log")
    plt.ylabel("Eigenvalues of $\\Sigma_{up}$", fontsize=fsize * 1.25)
    plt.xlabel("Number of Measurements", fontsize=fsize)
    # plt.legend()
    if save:
        plt.savefig(f"{fdir}/{prefix}-convergence.png", bbox_inches="tight")
        plt.close("all")
    else:
        plt.show()
        plt.close("all")

    fig, ax = plt.subplots(figsize=(15, 10))
    ax.set_yscale("log")
    index_values = np.arange(dim_input) + 1

    for i, N in enumerate(Ns):
        ax.scatter(
            index_values,
            UP[:, i],
            marker="o",
            s=200,
            facecolors="none",
            edgecolors="k",
        )

        ax.plot(
            index_values,
            UP[:, i],
            label=f"$N={N:1.0E}$",
            alpha=1,
            lw=3,
            ls=lines[i % len(lines)],
            c="k",
        )
    ax.set_xticks(index_values)
    ax.set_xticklabels(ax.get_xticks(), rotation=0)
    ax.set_xlabel("Index", fontsize=fsize)
    ax.set_ylabel("Eigenvalue", fontsize=fsize)

    ax.xaxis.set_major_formatter(FormatStrFormatter("%2d"))
    ax.legend(loc="lower left", fontsize=fsize * 0.75)

    if save:
        plt.savefig(f"{fdir}/{prefix}-sd-convergence.png", bbox_inches="tight")
        plt.close("all")
    else:
        plt.show()
        plt.close("all")
예제 #4
0
def main_dim(args):
    """
    Main entrypoint for High-Dim Linear Dimension Example
    """
    args = parse_args(args)
    setup_logging(args.loglevel)
    np.random.seed(args.seed)
    #     example       = args.example
    #     num_trials   = args.num_trials
    #     fsize        = args.fsize
    #     linewidth    = args.linewidth
    #     seed         = args.seed
    # dim_input     = args.input_dim
    #     save         = args.save
    #     alt          = args.alt
    #     bayes        = args.bayes
    #     prefix       = args.prefix
    #     dist         = args.dist

    presentation = False
    save = True

    if not presentation:
        plt.rcParams["mathtext.fontset"] = "stix"
        plt.rcParams["font.family"] = "STIXGeneral"
    fdir = "figures/lin"
    check_dir(fdir)

    fsize = 42

    def numnonzero(x, tol=1e-4):
        return len(x[abs(x) < tol])

    # # Impact of Dimension for Various Choices of $\\Sigma_\text{init}$

    # dim_output = dim_input
    dim_input, dim_output = 100, 100
    seed = 12
    np.random.seed(seed)

    # from sklearn.datasets import make_spd_matrix as make_spd
    # from sklearn.datasets import make_sparse_spd_matrix as make_cov
    # cov = np.eye(dim_input)
    initial_cov = np.diag(np.sort(np.random.rand(dim_input))[::-1] + 0.5)

    plt.figure(figsize=(10, 10))
    initial_mean = np.zeros(dim_input).reshape(-1, 1)
    # initial_mean = np.random.randn(dim_input).reshape(-1,1)
    randA = models.randA_gauss  # choose which variety of generating map
    A, b = models.randP(dim_input, randA=randA)
    prefix = "lin-dim-cov"
    alpha_list = [10 ** (n) for n in np.linspace(-3, 4, 8)]

    # option to fix A and perturb lam_ref

    lam_ref = np.random.randn(dim_input).reshape(-1, 1)
    # d = A @ lam_ref + b

    # %%time
    sols = compare_linear_sols_dim(lam_ref, A, b, alpha_list, initial_mean, initial_cov)

    # c = np.linalg.cond(A)*np.linalg.norm(lam_ref)
    c = np.linalg.norm(lam_ref)
    # c = 1
    err_mud_list = [
        [np.linalg.norm(_m[0] - lam_ref) / c for _m in sols[alpha]]
        for alpha in alpha_list
    ]
    err_map_list = [
        [np.linalg.norm(_m[1] - lam_ref) / c for _m in sols[alpha]]
        for alpha in alpha_list
    ]
    err_pin_list = [
        [np.linalg.norm(_m[2] - lam_ref) / c for _m in sols[alpha]]
        for alpha in alpha_list
    ]

    # c = np.linalg.cond(A)
    c = np.linalg.norm(A)
    # err_Amud_list = [[np.linalg.norm(A @ (_m[0] - lam_ref)) / c for _m in sols[alpha]] for alpha in alpha_list]
    # err_Amap_list = [[np.linalg.norm(A @ (_m[1] - lam_ref)) / c for _m in sols[alpha]] for alpha in alpha_list]
    # err_Apin_list = [[np.linalg.norm(A @ (_m[2] - lam_ref)) / c for _m in sols[alpha]] for alpha in alpha_list]

    # measure # of components that agree
    # err_mud_list = [[numnonzero(_m[0] - lam_ref) for _m in sols[alpha]] for alpha in alpha_list]
    # err_map_list = [[numnonzero(_m[1] - lam_ref) for _m in sols[alpha]] for alpha in alpha_list]
    # err_pin_list = [[numnonzero(_m[2] - lam_ref) for _m in sols[alpha]] for alpha in alpha_list]

    x, y = np.arange(1, dim_output, 1), err_mud_list[0][0:-1]

    slope, intercept = (
        np.linalg.pinv(np.vander(x, 2)) @ np.array(y).reshape(-1, 1)
    ).ravel()
    regression = slope * x + intercept

    # ---

    # # Convergence Plot

    for idx, alpha in enumerate(alpha_list):
        if (1 + idx) % 2 and alpha <= 10:
            plt.annotate(
                f"$\\alpha$={alpha:1.2E}",
                (100, max(err_map_list[idx][-1], 0.01)),
                fontsize=24,
            )
        _err_mud = err_mud_list[idx]
        _err_map = err_map_list[idx]
        _err_pin = err_pin_list[idx]

        plt.plot(x, _err_mud[:-1], label="MUD", c="k", lw=10)
        plt.plot(x, _err_map[:-1], label="MAP", c="r", ls="--", lw=5)
        plt.plot(x, _err_pin[:-1], label="LSQ", c="xkcd:light blue", ls="-", lw=5)

    # plt.plot(x, regression, c='g', ls='-')
    # plt.xlim(0,dim_output)
    if "id" in prefix:
        plt.title(
            "Convergence for Various $\\Sigma_{init} = \\alpha I$",
            fontsize=1.25 * fsize,
        )
    else:
        plt.title(
            "Convergence for Various $\\Sigma_{init} = \\alpha \\Sigma$",
            fontsize=1.25 * fsize,
        )
        # plt.yscale('log')
    # plt.yscale('log')
    # plt.xscale('log')
    plt.ylim(0, 1.0)
    # plt.ylim(1E-4, 5E-2)
    # plt.ylabel("$\\frac{||\\lambda^\\dagger - \\lambda||}{||\\lambda^\\dagger||}$", fontsize=fsize*1.25)
    plt.ylabel("Relative Error", fontsize=fsize * 1.25)
    plt.xlabel("Dimension of Output Space", fontsize=fsize)
    plt.legend(["MUD", "MAP", "Least Squares"], fontsize=fsize)
    # plt.annotate(f'Slope={slope:1.4f}', (4,4/7), fontsize=32)
    plt.savefig(f"{fdir}/{prefix}-convergence.png", bbox_inches="tight")
    plt.close("all")
예제 #5
0
def main_contours(args):
    """
    Main entrypoint for 2D Linear Rank-Deficient Example (Contour Plots)
    """
    args = parse_args(args)
    setup_logging(args.loglevel)
    np.random.seed(args.seed)
    #     example       = args.example
    #     num_trials   = args.num_trials
    #     fsize        = args.fsize
    #     linewidth    = args.linewidth
    #     seed         = args.seed
    #     inputdim     = args.input_dim
    #     save         = args.save
    #     alt          = args.alt
    #     bayes        = args.bayes
    #     prefix       = args.prefix
    #     dist         = args.dist

    presentation = False
    save = True

    if not presentation:
        plt.rcParams["mathtext.fontset"] = "stix"
        plt.rcParams["font.family"] = "STIXGeneral"
    fdir = "figures/contours"
    check_dir(fdir)
    lam_true = np.array([0.7, 0.3])
    initial_mean = np.array([0.25, 0.25])
    A = np.array([[1, 1]])
    b = np.zeros((1, 1))

    experiments = {}

    # data mismatch
    experiments["data_mismatch"] = {}
    experiments["data_mismatch"]["out_file"] = f"{fdir}/data_mismatch_contour.png"
    experiments["data_mismatch"]["data_check"] = True
    experiments["data_mismatch"]["full_check"] = False
    experiments["data_mismatch"]["tk_reg"] = 0
    experiments["data_mismatch"]["pr_reg"] = 0

    # tikonov regularization
    experiments["tikonov"] = {}
    experiments["tikonov"]["out_file"] = f"{fdir}/tikonov_contour.png"
    experiments["tikonov"]["tk_reg"] = 1
    experiments["tikonov"]["pr_reg"] = 0
    experiments["tikonov"]["data_check"] = False
    experiments["tikonov"]["full_check"] = False

    # modified regularization
    experiments["modified"] = {}
    experiments["modified"]["out_file"] = f"{fdir}/consistent_contour.png"
    experiments["modified"]["tk_reg"] = 1
    experiments["modified"]["pr_reg"] = 1
    experiments["modified"]["data_check"] = False
    experiments["modified"]["full_check"] = False

    # map point
    experiments["classical"] = {}
    experiments["classical"]["out_file"] = f"{fdir}/classical_solution.png"
    experiments["classical"]["tk_reg"] = 1
    experiments["classical"]["pr_reg"] = 0
    experiments["classical"]["data_check"] = True
    experiments["classical"]["full_check"] = True

    # mud point
    experiments["consistent"] = {}
    experiments["consistent"]["out_file"] = f"{fdir}/consistent_solution.png"
    experiments["consistent"]["tk_reg"] = 1
    experiments["consistent"]["pr_reg"] = 1
    experiments["consistent"]["data_check"] = True
    experiments["consistent"]["full_check"] = True

    # comparison
    experiments["compare"] = {}
    experiments["compare"]["out_file"] = f"{fdir}/map_compare_contour.png"
    experiments["compare"]["data_check"] = True
    experiments["compare"]["full_check"] = True
    experiments["compare"]["tk_reg"] = 1
    experiments["compare"]["pr_reg"] = 0
    experiments["compare"]["comparison"] = True
    experiments["compare"]["cov_01"] = -0.5

    for ex in experiments:
        _logger.info(f"Running {ex}")
        config = experiments[ex]
        out_file = config.get("out_file", "latest_figure.png")
        tk_reg = config.get("tk_reg", 1)
        pr_reg = config.get("pr_reg", 1)
        cov_01 = config.get("cov_01", -0.25)
        cov_11 = config.get("cov_11", 0.5)
        obs_std = config.get("obs_std", 0.5)
        full_check = config.get("full_check", True)
        data_check = config.get("data_check", True)
        numr_check = config.get("numr_check", False)
        comparison = config.get("comparison", False)

        contour_example(
            A=A,
            b=b,
            save=save,
            param_ref=lam_true,
            compare=comparison,
            cov_01=cov_01,
            cov_11=cov_11,
            initial_mean=initial_mean,
            alpha=tk_reg,
            omega=pr_reg,
            show_full=full_check,
            show_data=data_check,
            show_est=numr_check,
            obs_std=obs_std,
            figname=out_file,
        )
예제 #6
0
def main(args):
    """
    Main entrypoint for example-generation
    """
    args = parse_args(args)
    setup_logging(args.loglevel)
    np.random.seed(args.seed)
    #     example       = args.example
    #     num_trials   = args.num_trials
    #     fsize        = args.fsize
    #     linewidth    = args.linewidth
    #     seed         = args.seed
    #     inputdim     = args.input_dim
    #     save         = args.save
    #     alt          = args.alt
    #     bayes        = args.bayes
    #     prefix       = args.prefix
    #     dist         = args.dist
    fdir = "figures/comparison"
    check_dir(fdir)
    presentation = False
    save = True

    if not presentation:
        plt.rcParams["mathtext.fontset"] = "stix"
        plt.rcParams["font.family"] = "STIXGeneral"

    # number of samples from initial and observed mean (mu) and st. dev (sigma)
    N, mu, sigma = int(1e3), 0.25, 0.1
    lam = np.random.uniform(low=-1, high=1, size=N)

    # Evaluate the QoI map on this initial sample set to form a predicted data
    qvals_predict = QoI(lam, 5)  # Evaluate lam^5 samples

    # Estimate the push-forward density for the QoI
    pi_predict = kde(qvals_predict)

    # Compute more observations for use in BIP
    tick_fsize = 28
    legend_fsize = 24
    for num_data in [1, 5, 10, 20]:
        np.random.seed(
            123456
        )  # Just for reproducibility, you can comment out if you want.
        data = norm.rvs(loc=mu, scale=sigma**2, size=num_data)

        # We will estimate the observed distribution using a parametric estimate to keep
        # the assumptions involved as similar as possible between the BIP and the SIP
        # So, we will assume the sigma is known but that the mean mu is unknown and estimated
        # from data to fit a Gaussian distribution
        mu_est = np.mean(data)

        r_approx = np.divide(norm.pdf(qvals_predict, loc=mu_est, scale=sigma),
                             pi_predict(qvals_predict))

        # Use r to compute weighted KDE approximating the updated density
        update_kde = kde(lam, weights=r_approx)

        # Construct estimated push-forward of this updated density
        pf_update_kde = kde(qvals_predict, weights=r_approx)

        likelihood_vals = np.zeros(N)
        for i in range(N):
            likelihood_vals[i] = data_likelihood(qvals_predict[i], data,
                                                 num_data, sigma)

        # compute normalizing constants
        C_nonlinear = np.mean(likelihood_vals)
        data_like_normalized = likelihood_vals / C_nonlinear

        posterior_kde = kde(lam, weights=data_like_normalized)

        # Construct push-forward of statistical Bayesian posterior
        pf_posterior_kde = kde(qvals_predict, weights=data_like_normalized)

        # Plot the initial, updated, and posterior densities
        fig, ax = plt.subplots(figsize=(10, 10))
        lam_plot = np.linspace(-1, 1, num=1000)
        ax.plot(
            lam_plot,
            uniform.pdf(lam_plot, loc=-1, scale=2),
            "b--",
            linewidth=4,
            label="Initial/Prior",
        )
        ax.plot(lam_plot,
                update_kde(lam_plot),
                "k-.",
                linewidth=4,
                label="Update")
        ax.plot(lam_plot,
                posterior_kde(lam_plot),
                "g:",
                linewidth=4,
                label="Posterior")
        ax.set_xlim([-1, 1])
        if num_data > 1:
            plt.annotate(f"$N={num_data}$", (-0.75, 5),
                         fontsize=legend_fsize * 1.5)
            ax.set_ylim([0, 28])  # fix axis height for comparisons
        #         else:
        #             ax.set_ylim([0, 5])
        ax.tick_params(axis="x", labelsize=tick_fsize)
        ax.tick_params(axis="y", labelsize=tick_fsize)
        ax.set_xlabel("$\\Lambda$", fontsize=1.25 * tick_fsize)
        ax.legend(fontsize=legend_fsize, loc="upper left")
        if save:
            fig.savefig(f"{fdir}/bip-vs-sip-{num_data}.png",
                        bbox_inches="tight")
            plt.close(fig)
        # plt.show()

        # Plot the push-forward of the initial, observed density,
        # and push-forward of pullback and stats posterior
        fig, ax = plt.subplots(figsize=(10, 10))
        qplot = np.linspace(-1, 1, num=1000)
        ax.plot(
            qplot,
            norm.pdf(qplot, loc=mu, scale=sigma),
            "r-",
            linewidth=6,
            label="$N(0.25, 0.1^2)$",
        )
        ax.plot(qplot,
                pi_predict(qplot),
                "b-.",
                linewidth=4,
                label="PF of Initial")
        ax.plot(qplot,
                pf_update_kde(qplot),
                "k--",
                linewidth=4,
                label="PF of Update")
        ax.plot(qplot,
                pf_posterior_kde(qplot),
                "g:",
                linewidth=4,
                label="PF of Posterior")

        ax.set_xlim([-1, 1])
        if num_data > 1:
            plt.annotate(f"$N={num_data}$", (-0.75, 5),
                         fontsize=legend_fsize * 1.5)
            ax.set_ylim([0, 28])  # fix axis height for comparisons
        #         else:
        #             ax.set_ylim([0, 5])
        ax.tick_params(axis="x", labelsize=tick_fsize)
        ax.tick_params(axis="y", labelsize=tick_fsize)
        ax.set_xlabel("$\\mathcal{D}$", fontsize=1.25 * tick_fsize)
        ax.legend(fontsize=legend_fsize, loc="upper left")
        if save:
            fig.savefig(f"{fdir}/bip-vs-sip-pf-{num_data}.png",
                        bbox_inches="tight")
            plt.close(fig)