예제 #1
0
    def callback(bnn_params, dsc_params, iter, gen_gradient, dsc_gradient):
        # Sample functions from priors f ~ p(f)
        n_samples = 3
        plot_inputs = np.linspace(-8, 8, num=100).reshape(100, 1)
        std_norm_param = init_var_params(bnn_layer_sizes,
                                         scale_mean=0,
                                         scale=1)

        f_bnn_gpp = sample_bnn(bnn_params, plot_inputs, n_samples,
                               bnn_layer_sizes)
        f_gp = sample_gpp(plot_inputs, n_samples)
        f_bnn = sample_bnn(std_norm_param, plot_inputs, n_samples,
                           bnn_layer_sizes)

        # Plot samples of functions from the bnn and gp priors.
        if plot_during:
            for axes in ax:
                axes.cla()

            # ax.plot(x.ravel(), y.ravel(), 'ko')
            ax[0].plot(plot_inputs, f_gp, color='green')
            ax[1].plot(plot_inputs, f_bnn_gpp, color='red')
            ax[2].plot(plot_inputs, f_bnn, color='blue')

            plt.draw()
            plt.pause(1.0 / 40.0)

        print("Iteration {} ".format(iter))
예제 #2
0
    def callback_kl(prior_params, iter, g):
        n_samples, n_data = 3, 500
        plot_inputs = np.linspace(-8, 8, num=n_data).reshape(1, n_data)

        f_bnn_gpp = sample_bnn(plot_inputs, n_samples, arch, act, prior_params)
        f_bnn = sample_bnn(plot_inputs, arch, act, n_samples)
        f_gp = sample_gpp(plot_inputs, n_samples)

        # Plot samples of functions from the bnn and gp priors.
        if plot_during:
            for axes in ax:
                axes.cla()  # clear plots
            # ax.plot(x.ravel(), y.ravel(), 'ko')
            ax[0].plot(plot_inputs, f_gp, color='green')
            ax[1].plot(plot_inputs, f_bnn_gpp, color='red')
            ax[2].plot(plot_inputs, f_bnn, color='blue')
            #ax[0].set_ylim([-5, 5])
            #ax[1].set_ylim([-5, 5])
            #ax[2].set_ylim([-5, 5])

            plt.draw()
            plt.pause(1.0 / 40.0)

        fs = (f_gp, f_bnn, f_bnn_gpp)
        kl_val = kl(prior_params, iter)

        if save_during:
            title = " iter {} kl {:5}".format(iter, kl_val)
            plotting.plot_priors(plot_inputs, fs,
                                 os.path.join(save_dir, title))

        print("Iteration {} KL {} ".format(iter, kl_val))
예제 #3
0
    def callback(bnn_params, dsc_params, iter, gen_gradient, dsc_gradient):
        # Sample functions from priors f ~ p(f)
        n_samples, ndata = 3, 500
        plot_inputs = np.linspace(-8, 8, num=ndata).reshape(ndata, 1)
        std_norm_param = init_var_params(bnn_arch, scale_mean=0, scale=1)

        f_bnn_gpp = sample_bnn(bnn_params, plot_inputs, n_samples, bnn_arch,
                               act)
        f_gp = sample_gpp(plot_inputs, n_samples, ker)
        f_bnn = sample_bnn(std_norm_param, plot_inputs, n_samples, bnn_arch,
                           act)

        if plot_during:
            for axes in ax:
                axes.cla()

            # ax.plot(x.ravel(), y.ravel(), 'ko')
            ax[0].plot(plot_inputs, f_gp.T, color='green')
            ax[1].plot(plot_inputs, f_bnn_gpp.T, color='red')
            ax[2].plot(plot_inputs, f_bnn.T, color='blue')
            #ax[0].set_ylim([-3,3])
            #ax[1].set_ylim([-3,3])
            #ax[2].set_ylim([-3,3])

            plt.draw()
            plt.pause(1.0 / 40.0)

        print("Iteration {} ".format(iter))
예제 #4
0
파일: vip.py 프로젝트: danielflamshep/gpbnn
def predictions(prior_params, X, y, Xstar, noise, f_samples, arch, act):
    fs = sample_bnn(prior_params, X, f_samples, arch, act)

    m, K_ff = prior_mean_cov(prior_params, X, f_samples, arch, act)
    qa_mean, qa_Sigma = qa_posterior_moments(m, K_ff, y, noise)

    fss = sample_bnn(prior_params, Xstar, f_samples, arch, act)
    mstar = np.mean(fs, axis=0, keepdims=True)
    phi = (fss - mstar) / unbiased(fss)

    pred_mean = np.dot(phi.T, qa_mean)
    pred_var = np.dot(phi.T, np.dot(qa_Sigma, phi))

    return pred_mean, pred_var
예제 #5
0
def gan_objective(prior_params,
                  d_params,
                  n_data,
                  n_samples,
                  bnn_layer_sizes,
                  act,
                  d_act='tanh'):
    '''estimates V(G, D) = E_p_gp[D(f)] - E_pbnn[D(f)]]'''

    x = sample_inputs('uniform', n_data, (-10, 10))
    fbnns = sample_bnn(prior_params, x, n_samples, bnn_layer_sizes,
                       act)  # [nf, nd]
    fgps = sample_gpp(x, n_samples, 'rbf')  # sample f ~ P_gp(f)

    D_fbnns = nn_predict(d_params, fbnns, d_act)
    D_fgps = nn_predict(d_params, fgps, d_act)
    print(D_fbnns.shape)
    eps = np.random.uniform()
    f = eps * fgps + (1 - eps) * fbnns

    def D(function):
        return nn_predict(d_params, function, 'tanh')

    J = jacobian(D)(f)
    print(J.shape)
    g = elementwise_grad(D)(f)
    print(g.shape)
    pen = 10 * (norm(g, ord=2, axis=1) - 1)**2

    return np.mean(D_fgps - D_fbnns + pen)
예제 #6
0
def plot_save_priors_fdensity(bnn_arch=[1, 20, 1], bnn_act='rbf'):
    plot_inputs = np.linspace(-10, 10, num=500)[:, None]
    std_norm_param = init_var_params(bnn_arch, scale_mean=0, scale=1)
    f_bnn = sample_bnn(std_norm_param, plot_inputs, 25, bnn_arch, bnn_act)
    f_gps = sample_gpp(plot_inputs, 25)
    plot_density(f_bnn)
    plot_density(f_gps, plot="gpp")
    pass
예제 #7
0
def plot_save_priors_functions(bnn_arch=[1, 20, 1], bnn_act='rbf'):
    plot_inputs = np.linspace(-10, 10, num=500)[:, None]
    std_norm_param = init_var_params(bnn_arch, scale_mean=0, scale=1)
    f_bnn = sample_bnn(std_norm_param, plot_inputs, 3, bnn_arch, bnn_act)
    f_gps = sample_gpp(plot_inputs, 3)
    plot_samples(plot_inputs, f_bnn.T)
    plot_samples(plot_inputs, f_gps.T, plot="gpp")
    pass
예제 #8
0
def kl_estimate(params, n_samples, arch, act):
    prior_params, noise, kernel_params, x = params
    x = sample_inputs('gridbox', 100, (0, 10))
    y = sample_bnn(prior_params, x, n_samples, arch, act, noise)  # [nf, nd]
    f = sample_bnn(prior_params, x, n_samples, arch, act)

    w = sample_weights(prior_params, 1)
    mu, log_std = prior_params
    log_prior = diag_gaussian_log_density(w, mu, log_std)
    log_likelihood = diag_gaussian_log_density(y, f, noise)

    jitter = 1e-7 * np.eye(y.shape[0])
    cov = covariance(kernel_params, x, x) + jitter
    log_pgp = mvn.logpdf(y, np.zeros(y.shape[1]), cov)

    print(log_likelihood.shape, log_pgp.shape)

    return np.mean(log_likelihood + log_prior - log_pgp)
예제 #9
0
def kl_estimate(params,
                layer_sizes,
                n_data,
                N_samples,
                act='rbf',
                kernel='rbf',
                noise=1e-7):
    x = np.random.uniform(-10, 10, size=(n_data, 1))
    y = sample_bnn(params, x, N_samples, layer_sizes, act)  # [nf, nd]
    covariance = kernel_dict[kernel]
    cov = covariance(x, x) + noise * np.eye(x.shape[0])
    print(cov, y.shape, det(cov))
    log_gp = log_gp_prior(y, cov)
    #log_gp = mvn.logpdf(y, np.zeros(y.shape[1]), cov)
    return -entropy_estimate(y) - np.mean(log_gp)
예제 #10
0
파일: vip.py 프로젝트: danielflamshep/gpbnn
def elbo(prior_params, qa_params, X, y, f_samples, arch, act, noise):
    fs = sample_bnn(prior_params, X, f_samples, arch, act)
    m = np.mean(fs, axis=0, keepdims=True)
    qa_mean, qa_Sigma = qa_params

    # m, K_ff = prior_mean_cov(prior_params, X, f_samples, arch, act)
    # a_samples = sample_full_normal(qa_posterior_moments(m, K_ff, y, noise),1)
    # qa_mean, qa_Sigma = qa_posterior_moments(m, K_ff, y, noise)
    a_samples = sample_normal(qa_params, 1)
    print(a_samples.shape, fs.shape, m.shape)
    mean = a_samples * (fs - m) / unbiased(fs)

    log_qy = diag_gaussian_log_density(y, mean, noise)
    log_qa = mvn.logpdf(a_samples, qa_mean, qa_Sigma)
    log_pa = diag_gaussian_log_density(a_samples, 0, 1)

    return np.mean(log_qy - log_qa + log_pa)
예제 #11
0
    def callback(params, iter, g):

        n_samples = 3
        plot_inputs = np.linspace(-5, 5, num=100)

        f_bnn = sample_bnn(params, plot_inputs[:, None], n_samples, arch, act)
        fgp   = sample_gpp(plot_inputs[:, None], n_samples, kernel=ker)

        for axes in ax: axes.cla()
        # ax.plot(x.ravel(), y.ravel(), 'ko')
        ax[0].plot(plot_inputs, fgp.T, color='green')
        ax[1].plot(plot_inputs, f_bnn.T, color='red')
        #ax[0].set_ylim([-5, 5])
        #ax[1].set_ylim([-5, 5])

        plt.draw()
        plt.pause(1.0/40.0)

        print("Iteration {} KL {} ".format(iter, kl(params, iter)))
예제 #12
0
파일: vip.py 프로젝트: danielflamshep/gpbnn
def predictions_qa(prior_params, X, y, Xstar, noise, f_samples, arch, act):
    fs = sample_bnn(prior_params, Xstar, f_samples, arch, act)
    m, K_ff = prior_mean_cov(prior_params, X, f_samples, arch, act)
    a_samples = sample_full_normal(qa_posterior_moments(m, K_ff, y, noise), 1)
    print(a_samples.shape)
    return np.sum(fs * a_samples, 0)
예제 #13
0
파일: vip.py 프로젝트: danielflamshep/gpbnn
def prior_mean_cov(prior_params, X, n_samples, arch, act):
    fs = sample_bnn(prior_params, X, n_samples, arch, act)
    return empirical_moments(fs.T)
예제 #14
0
def NN_likelihood(x, y, params, arch, act, n_samples=10, noise=1e-1):
    f = sample_bnn(params, x, n_samples, arch, act)
    return diag_gaussian_log_density(f, y, noise)
예제 #15
0
if __name__ == '__main__':

    n_data, n_samples, arch = 10, 1, [1, 20, 20, 1]

    f, ax = plt.subplots(2, sharex=True)
    plt.ion()
    plt.show(block=False)

    def kl(prior_params,t):
        return outer_objective(prior_params, n_data, n_samples, arch)

    def callback(params, iter, g):

        n_samples = 3
 f_bnn_gpp = sample_bnn(params, plot_inputs[:, None], n_samples, arch, rbf)
        f_gp      = sample_gpp(plot_inputs[:, None], n_samples)
_min
        for axes in ax: axes.cla()
        # ax.plot(x.ravel(), y.ravel(), 'ko')
        ax[0].plot(plot_inputs, f_gp.T, color='green')
        ax[1].plot(plot_inputs, f_bnn_gpp.T, color='red')
        #ax[0].set_ylim([-5, 5])
        #ax[1].set_ylim([-5, 5])

        plt.draw()
        plt.pause(1.0/40.0)

        print("Iteration {} KL {} ".format(iter, kl(params, iter)))

    prior_params = adam(grad(kl), init_var_params(arch),
예제 #16
0
def NN_likelihood(x, y, params, arch, act, n_samples, noise=1e-1):
    f = sample_bnn(params, x, n_samples, arch, act)  # [ns, nd]
    return diag_gaussian_density(f, y, noise) # y shape [ns, nd]