Exemplo n.º 1
0
def plot_epistemic_var_vs_time(gp, solver, traj_init, traj_opt=None):
    params = {
        "text.usetex": True,
        "text.latex.preamble": [
            "\\usepackage{amssymb}",
            "\\usepackage{amsmath}",
        ],
    }
    plt.rcParams.update(params)

    _, var_init = gp_predict(
        traj_init[:, 0:2],
        gp.Z,
        kernels=gp.kernel,
        mean_funcs=gp.mean_func,
        f=gp.q_mu,
        q_sqrt=gp.q_sqrt,
        full_cov=False,
    )
    var_opt = 0.0
    if traj_opt is not None:
        _, var_opt = gp_predict(
            traj_opt[:, 0:2],
            gp.Z,
            kernels=gp.kernel,
            mean_funcs=gp.mean_func,
            f=gp.q_mu,
            q_sqrt=gp.q_sqrt,
            full_cov=False,
        )
    fig, ax = plt.subplots(1, 1, figsize=(6.4, 2.8))
    ax.set_xlabel("Time $t$")
    ax.set_ylabel("$\mathbb{V}[h^{(1)}]$")

    ax.plot(
        solver.times,
        var_init[0, :],
        color=color_init,
        label="Initial trajectory",
    )
    if traj_opt is not None:
        ax.plot(
            solver.times,
            var_opt[0, :],
            color=color_opt,
            label="Optimised trajectory",
        )
    ax.legend()
    sum_var_init = np.sum(var_init)
    sum_var_opt = np.sum(var_opt)
    print("Sum epistemic var init = ", sum_var_init)
    print("Sum epistemic var opt = ", sum_var_opt)
    return fig, ax
Exemplo n.º 2
0
def mogpe_mixing_probability(
    Xnew: InputData,
    X: InputData,
    kernel: Kernel,
    mean_func: MeanFunc,
    f: OutputData,
    full_cov: bool = False,
    q_sqrt=None,
    jitter=1e-6,
    white: bool = True,
):
    """
    TODO are these dimensions the right way around?
    returns: [num_test, output_dim, input_dim]
    """
    if len(Xnew.shape) == 1:
        Xnew = Xnew.reshape(1, -1)
    mu, var = gp_predict(
        Xnew,
        X,
        kernel,
        mean_func,
        f,
        full_cov=full_cov,
        q_sqrt=q_sqrt,
        jitter=jitter,
        white=white,
    )
    output_dim = f.shape[-1]
    if full_cov is False:
        var = var.reshape(-1, output_dim)
    prob_a_0, _ = bernoulli_predict_mean_and_var(mu, var)
    print("prob_a_0")
    print(prob_a_0.shape)
    return 10 * prob_a_0
Exemplo n.º 3
0
def plot_svgp_and_start_end(gp, solver, traj_opt=None):
    params = {
        "text.usetex": True,
        "text.latex.preamble": [
            "\\usepackage{amssymb}",
            "\\usepackage{amsmath}",
        ],
    }
    plt.rcParams.update(params)

    Xnew, xx, yy = create_grid(gp.X, N=961)
    mu, var = gp_predict(
        Xnew,
        gp.Z,
        kernels=gp.kernel,
        mean_funcs=gp.mean_func,
        f=gp.q_mu,
        q_sqrt=gp.q_sqrt,
        full_cov=False,
    )
    print("mu var")
    # print(mu.shape)
    # print(var.shape)
    # mu = mu[0:1, :, :]
    # var = var[0:1, :]
    # mu = mu[1:2, :, :]
    # var = var[1:2, :]
    print(mu.shape)
    print(var.shape)
    fig, axs = plot_mean_and_var(
        xx,
        yy,
        mu,
        var,
        llabel="$\mathbb{E}[h^{(1)}(\mathbf{x})]$",
        rlabel="$\mathbb{V}[h^{(1)}(\mathbf{x})]$",
    )

    for ax in axs:
        fig, ax = plot_start_and_end_pos(fig, ax, solver)
        # plot_omitted_data(fig, ax, color="k")
        # ax.scatter(gp.X[:, 0], gp.X[:, 1])
        plot_traj(
            fig,
            ax,
            solver.state_guesses,
            color=color_init,
            label="Initial trajectory",
        )
        if traj_opt is not None:
            plot_traj(
                fig,
                ax,
                traj_opt,
                color=color_opt,
                label="Optimised trajectory",
            )
    axs[0].legend()
    return fig, axs
Exemplo n.º 4
0
def plot_svgp_and_start_end(gp, solver, traj_opts=None, labels=["", ""]):
    params = {
        "text.usetex":
        True,
        "text.latex.preamble": [
            "\\usepackage{amssymb}",
            "\\usepackage{amsmath}",
        ],
    }
    plt.rcParams.update(params)

    # Xnew, xx, yy = create_grid(gp.X, N=961)
    Xnew, xx, yy = create_grid(gp.Z, N=961)
    mu, var = gp_predict(
        Xnew,
        gp.Z,
        kernels=gp.kernel,
        mean_funcs=gp.mean_func,
        f=gp.q_mu,
        q_sqrt=gp.q_sqrt,
        full_cov=False,
    )
    print("mu var")
    # mu = mu[0:1, :, :]
    # var = var[0:1, :]
    # mu = mu[1:2, :, :]
    # var = var[1:2, :]
    print(mu.shape)
    print(var.shape)
    fig, axs = plot_mean_and_var(
        xx,
        yy,
        mu,
        var,
        llabel="$\mathbb{E}[h^{(1)}(\mathbf{x})]$",
        rlabel="$\mathbb{V}[h^{(1)}(\mathbf{x})]$",
    )

    for ax in axs:
        fig, ax = plot_start_and_end_pos(fig, ax, solver)
        # plot_omitted_data(fig, ax, color="k")
        ax.set_xlabel("$x$")
        ax.set_ylabel("$y$")
        # ax.scatter(gp.X[:, 0], gp.X[:, 1])
        plot_traj(fig,
                  ax,
                  solver.state_guesses,
                  color=color_init,
                  label="Init traj")
        if traj_opts is not None:
            if isinstance(traj_opts, list):
                for traj, label, color_opt in zip(traj_opts, labels,
                                                  color_opts):
                    plot_traj(fig, ax, traj, color=color_opt, label=label)
            else:
                plot_traj(fig, ax, traj_opts)
    axs[0].legend(loc="lower left")
    return fig, axs
Exemplo n.º 5
0
def plot_3d_traj_mean_and_var(fig, axs, gp, traj):
    Xnew, xx, yy = create_grid(gp.X, N=961)
    traj_mu, traj_var = gp_predict(
        traj[:, 0:2],
        gp.Z,
        kernels=gp.kernel,
        mean_funcs=gp.mean_func,
        f=gp.q_mu,
        q_sqrt=gp.q_sqrt,
        full_cov=False,
    )
    plot_3d_traj(fig, axs[0], traj, zs=traj_mu)
    plot_3d_traj(fig, axs[1], traj, zs=traj_var)
    return fig, axs
Exemplo n.º 6
0
def single_mogpe_mixing_probability(
    Xnew: InputData,
    X: InputData,
    kernel: Kernel,
    mean_func: MeanFunc,
    f: OutputData,
    full_cov: bool = False,
    q_sqrt=None,
    jitter=1e-6,
    white: bool = True,
):
    """
    TODO are these dimensions the right way around?
    returns: [num_test, output_dim, input_dim]
    """
    if len(Xnew.shape) == 1:
        Xnew = Xnew.reshape(1, -1)
    mu, var = gp_predict(
        Xnew,
        X,
        kernel,
        mean_func,
        f,
        full_cov=full_cov,
        q_sqrt=q_sqrt,
        jitter=jitter,
        white=white,
    )
    print("inside single mixing prob")
    print(mu.shape)
    print(var.shape)
    output_dim = f.shape[-1]
    if full_cov is False:
        var = var.reshape(-1, output_dim)
    prob_a_0s = []
    for i in range(output_dim):
        prob_a_0, _ = bernoulli_predict_mean_and_var(
            mu[i : i + 1, :], var[:, i : i + 1]
        )
        prob_a_0s.append(prob_a_0.reshape(1))
    prob_a_0 = np.stack(prob_a_0s)  # [input_dim , 1]
    print("prob a 0")
    print(prob_a_0.shape)

    # product over output dimensions
    prob_a_0 = np.prod(prob_a_0)
    print(prob_a_0.shape)
    # return prob_a_0
    return prob_a_0.reshape(-1)
Exemplo n.º 7
0
def plot_gp_and_start_end(gp, solver):
    Xnew, xx, yy = create_grid(gp.X, N=961)
    mu, var = gp_predict(
        Xnew,
        gp.X,
        kernels=gp.kernel,
        mean_funcs=gp.mean_func,
        f=gp.Y,
        q_sqrt=gp.q_sqrt,
        full_cov=False,
    )
    fig, axs = plot_mean_and_var(xx, yy, mu, var)

    for ax in axs:
        fig, ax = plot_start_and_end_pos(fig, ax, solver)
    return fig, axs
Exemplo n.º 8
0
def plot_3d_mean_and_var(gp, solver):

    Xnew, xx, yy = create_grid(gp.X, N=961)
    mu, var = gp_predict(
        Xnew,
        gp.Z,
        kernels=gp.kernel,
        mean_funcs=gp.mean_func,
        f=gp.q_mu,
        q_sqrt=gp.q_sqrt,
        full_cov=False,
    )

    fig = plt.figure(figsize=plt.figaspect(0.5))
    ax_mu = fig.add_subplot(1, 2, 1, projection="3d")
    surf_mu = plot_3d_surf(fig, ax_mu, xx, yy, mu)
    ax_var = fig.add_subplot(1, 2, 2, projection="3d")
    surf_var = plot_3d_surf(fig, ax_var, xx, yy, var)

    axs = [ax_mu, ax_var]
    ax_mu.set_zlabel("Mean")
    ax_var.set_zlabel("Variance")
    return fig, axs
Exemplo n.º 9
0
def plot_svgp_jacobian_mean(gp, solver, traj_opt=None):
    params = {
        "text.usetex": True,
        "text.latex.preamble": [
            "\\usepackage{amssymb}",
            "\\usepackage{amsmath}",
        ],
    }
    plt.rcParams.update(params)

    Xnew, xx, yy = create_grid(gp.X, N=961)
    mu, var = gp_predict(
        Xnew,
        gp.Z,
        kernels=gp.kernel,
        mean_funcs=gp.mean_func,
        f=gp.q_mu,
        q_sqrt=gp.q_sqrt,
        full_cov=False,
    )

    def gp_jacobian_all(x):
        if len(x.shape) == 1:
            x = x.reshape(1, -1)
        return gp_jacobian(
            x,
            gp.Z,
            gp.kernel,
            gp.mean_func,
            f=gp.q_mu,
            q_sqrt=gp.q_sqrt,
            full_cov=False,
        )

    mu_j, var_j = jax.vmap(gp_jacobian_all, in_axes=(0))(Xnew)
    print("gp jacobain mu var")
    print(mu_j.shape)
    print(var_j.shape)
    # mu = np.prod(mu, 1)
    # var = np.diagonal(var, axis1=-2, axis2=-1)
    # var = np.prod(var, 1)
    fig, axs = plot_mean_and_var(
        xx,
        yy,
        mu,
        var,
        # mu,
        # var,
        llabel="$\mathbb{E}[h^{(1)}]$",
        rlabel="$\mathbb{V}[h^{(1)}]$",
    )

    for ax in axs:
        ax.quiver(Xnew[:, 0], Xnew[:, 1], mu_j[:, 0], mu_j[:, 1], color="k")

        fig, ax = plot_start_and_end_pos(fig, ax, solver)
        plot_omitted_data(fig, ax, color="k")
        # ax.scatter(gp.X[:, 0], gp.X[:, 1])
        plot_traj(
            fig,
            ax,
            solver.state_guesses,
            color=color_init,
            label="Initial trajectory",
        )
        if traj_opt is not None:
            plot_traj(
                fig,
                ax,
                traj_opt,
                color=color_opt,
                label="Optimised trajectory",
            )
    axs[0].legend()
    return fig, axs
Exemplo n.º 10
0
def plot_svgp_and_all_trajs(gp, solver, traj_opts=None, labels=None):
    params = {
        "text.usetex":
        True,
        "text.latex.preamble": [
            "\\usepackage{amssymb}",
            "\\usepackage{amsmath}",
        ],
    }
    plt.rcParams.update(params)

    Xnew, xx, yy = create_grid(gp.X, N=961)
    mu, var = gp_predict(
        Xnew,
        gp.Z,
        kernels=gp.kernel,
        mean_funcs=gp.mean_func,
        f=gp.q_mu,
        q_sqrt=gp.q_sqrt,
        full_cov=False,
    )
    print("mu var")
    # print(mu.shape)
    # print(var.shape)
    # mu = mu[0:1, :, :]
    # var = var[0:1, :]
    # mu = mu[1:2, :, :]
    # var = var[1:2, :]
    print(mu.shape)
    print(var.shape)
    fig, axs = plot_mean_and_var(
        xx,
        yy,
        mu,
        var,
        llabel="$\mathbb{E}[h^{(1)}]$",
        rlabel="$\mathbb{V}[h^{(1)}]$",
    )

    for ax in axs:
        fig, ax = plot_start_and_end_pos(fig, ax, solver)
        plot_omitted_data(fig, ax, color="k")
        ax.set_xlabel("$x$")
        ax.set_ylabel("$y$")
        # ax.scatter(gp.X[:, 0], gp.X[:, 1])
        plot_traj(fig,
                  ax,
                  solver.state_guesses,
                  color=color_init,
                  label="Init traj")
        save_name = dir_name + "svgp_2d_traj_init.pdf"
        plt.savefig(save_name,
                    transparent=True,
                    bbox_inches="tight",
                    pad_inches=0)

    if traj_opts is not None:
        i = 0
        for traj, label, color_opt in zip(traj_opts, labels, color_opts):
            for ax in axs:
                plot_traj(fig, ax, traj, color=color_opt, label=label)
            save_name = dir_name + "svgp_2d_traj_" + str(i) + ".pdf"
            plt.savefig(save_name,
                        transparent=True,
                        bbox_inches="tight",
                        pad_inches=0)
            i += 1

    # axs[0].legend(loc='lower left')
    return fig, axs