Пример #1
0
    def callback(bnn_params, dsc_params, iter, gen_gradient, dsc_gradient):
        # Sample functions from priors f ~ p(f)
        n_samples, ndata = 3, 500
        plot_inputs = np.linspace(-8, 8, num=ndata).reshape(ndata, 1)
        std_norm_param = init_var_params(bnn_arch, scale_mean=0, scale=1)

        f_bnn_gpp = sample_bnn(bnn_params, plot_inputs, n_samples, bnn_arch,
                               act)
        f_gp = sample_gpp(plot_inputs, n_samples, ker)
        f_bnn = sample_bnn(std_norm_param, plot_inputs, n_samples, bnn_arch,
                           act)

        if plot_during:
            for axes in ax:
                axes.cla()

            # ax.plot(x.ravel(), y.ravel(), 'ko')
            ax[0].plot(plot_inputs, f_gp.T, color='green')
            ax[1].plot(plot_inputs, f_bnn_gpp.T, color='red')
            ax[2].plot(plot_inputs, f_bnn.T, color='blue')
            #ax[0].set_ylim([-3,3])
            #ax[1].set_ylim([-3,3])
            #ax[2].set_ylim([-3,3])

            plt.draw()
            plt.pause(1.0 / 40.0)

        print("Iteration {} ".format(iter))
Пример #2
0
    def callback(bnn_params, dsc_params, iter, gen_gradient, dsc_gradient):
        # Sample functions from priors f ~ p(f)
        n_samples = 3
        plot_inputs = np.linspace(-8, 8, num=100).reshape(100, 1)
        std_norm_param = init_var_params(bnn_layer_sizes,
                                         scale_mean=0,
                                         scale=1)

        f_bnn_gpp = sample_bnn(bnn_params, plot_inputs, n_samples,
                               bnn_layer_sizes)
        f_gp = sample_gpp(plot_inputs, n_samples)
        f_bnn = sample_bnn(std_norm_param, plot_inputs, n_samples,
                           bnn_layer_sizes)

        # Plot samples of functions from the bnn and gp priors.
        if plot_during:
            for axes in ax:
                axes.cla()

            # ax.plot(x.ravel(), y.ravel(), 'ko')
            ax[0].plot(plot_inputs, f_gp, color='green')
            ax[1].plot(plot_inputs, f_bnn_gpp, color='red')
            ax[2].plot(plot_inputs, f_bnn, color='blue')

            plt.draw()
            plt.pause(1.0 / 40.0)

        print("Iteration {} ".format(iter))
Пример #3
0
def plot_save_priors_fdensity(bnn_arch=[1, 20, 1], bnn_act='rbf'):
    plot_inputs = np.linspace(-10, 10, num=500)[:, None]
    std_norm_param = init_var_params(bnn_arch, scale_mean=0, scale=1)
    f_bnn = sample_bnn(std_norm_param, plot_inputs, 25, bnn_arch, bnn_act)
    f_gps = sample_gpp(plot_inputs, 25)
    plot_density(f_bnn)
    plot_density(f_gps, plot="gpp")
    pass
Пример #4
0
def plot_save_priors_functions(bnn_arch=[1, 20, 1], bnn_act='rbf'):
    plot_inputs = np.linspace(-10, 10, num=500)[:, None]
    std_norm_param = init_var_params(bnn_arch, scale_mean=0, scale=1)
    f_bnn = sample_bnn(std_norm_param, plot_inputs, 3, bnn_arch, bnn_act)
    f_gps = sample_gpp(plot_inputs, 3)
    plot_samples(plot_inputs, f_bnn.T)
    plot_samples(plot_inputs, f_gps.T, plot="gpp")
    pass
Пример #5
0
def outer_objective(params_prior, n_data, n_samples, layer_sizes, act=rbf):

    # x = np.random.uniform(low=0, high=10, size=(n_data, 1))

    x = np.linspace(0, 10, num=n_data).reshape(n_data, 1)
    fgp = sample_gpp(x, n_samples=n_samples, kernel='rbf')


    def objective_inner(params_q, t):
        return -np.mean(NN_likelihood(x, fgp, params_q, layer_sizes, act))

    op_params_q = adam(grad(objective_inner), init_var_params(layer_sizes),
                       step_size=0.1, num_iters=200)

    wq = sample_weights(op_params_q, n_samples)
    log_pnn = NN_likelihood(x, fgp, op_params_q, layer_sizes, act)
    log_p = diag_gaussian_log_density(wq, params_prior[0], params_prior[1])
    log_q = diag_gaussian_log_density(wq, op_params_q[0], op_params_q[1])

    return -np.mean(log_pnn+log_p-log_q)
Пример #6
0
def outer_objective(params_prior, n_data, n_samples, layer_sizes, act, ker='rbf'):

    # x = np.random.uniform(low=0, high=10, size=(n_data, 1))

    x = np.linspace(0, 10, num=n_data).reshape(n_data, 1)
    fgp = sample_gpp(x, n_samples=n_samples, kernel=ker)

    def objective_inner(params_q, t):
        return -np.mean(NN_likelihood(x, fgp, params_q, layer_sizes, act, n_samples))

    op_params_q = adam(grad(objective_inner), init_var_params(layer_sizes),
                       step_size=0.1, num_iters=100)

    wq = sample_weights(op_params_q, n_samples) # [ns, nw]
    pnn = NN_likelihood(x, fgp, op_params_q, layer_sizes, act, n_samples)
    p = diag_gaussian_density(wq, params_prior[0], params_prior[1])
    q = diag_gaussian_density(wq, op_params_q[0], op_params_q[1])

    iwae = p*pnn/q

    # print(iwae.shape)

    return np.mean(np.log(iwae))
Пример #7
0
def init_params(arch):
    return [init_var_params(arch), rs.randn(2), 1e-5 * rs.randn()]
Пример #8
0
                           bnn_layer_sizes)

        # Plot samples of functions from the bnn and gp priors.
        if plot_during:
            for axes in ax:
                axes.cla()

            # ax.plot(x.ravel(), y.ravel(), 'ko')
            ax[0].plot(plot_inputs, f_gp, color='green')
            ax[1].plot(plot_inputs, f_bnn_gpp, color='red')
            ax[2].plot(plot_inputs, f_bnn, color='blue')

            plt.draw()
            plt.pause(1.0 / 40.0)

        print("Iteration {} ".format(iter))

    # INITIALIZE THE PARAMETERS
    init_gen_params = init_var_params(bnn_layer_sizes, scale=-1.5)

    # OPTIMIZE
    grad_gan = grad(objective)

    optimized_params = adam(grad_gan,
                            init_gen_params,
                            init_dsc_params,
                            step_size_max=step_size_max,
                            step_size_min=step_size_min,
                            num_iters=num_epochs,
                            callback=callback)
Пример #9
0
    def callback(params, iter, g):

        n_samples = 3
 f_bnn_gpp = sample_bnn(params, plot_inputs[:, None], n_samples, arch, rbf)
        f_gp      = sample_gpp(plot_inputs[:, None], n_samples)
_min
        for axes in ax: axes.cla()
        # ax.plot(x.ravel(), y.ravel(), 'ko')
        ax[0].plot(plot_inputs, f_gp.T, color='green')
        ax[1].plot(plot_inputs, f_bnn_gpp.T, color='red')
        #ax[0].set_ylim([-5, 5])
        #ax[1].set_ylim([-5, 5])

        plt.draw()
        plt.pause(1.0/40.0)

        print("Iteration {} KL {} ".format(iter, kl(params, iter)))

    prior_params = adam(grad(kl), init_var_params(arch),
                        step_size=0.05, num_iters=100, callback=callback)









Пример #10
0
        if plot_during:
            for axes in ax:
                axes.cla()

            # ax.plot(x.ravel(), y.ravel(), 'ko')
            ax[0].plot(plot_inputs, f_gp.T, color='green')
            ax[1].plot(plot_inputs, f_bnn_gpp.T, color='red')
            ax[2].plot(plot_inputs, f_bnn.T, color='blue')
            #ax[0].set_ylim([-3,3])
            #ax[1].set_ylim([-3,3])
            #ax[2].set_ylim([-3,3])

            plt.draw()
            plt.pause(1.0 / 40.0)

        print("Iteration {} ".format(iter))

    init_gen_params = init_var_params(bnn_arch, scale=-1.5)
    init_dsc_params = init_random_params(dsc_arch)

    # OPTIMIZE
    grad_gan = grad(objective, argnum=(0, 1))

    optimized_params = adam_minimax(grad_gan,
                                    init_gen_params,
                                    init_dsc_params,
                                    step_size_max=0.001,
                                    step_size_min=0.001,
                                    num_iters=200,
                                    callback=callback)
Пример #11
0
    def kl(prior_params, t):
        return kl_estimate(prior_params, arch, n_data, n_samples, act, ker)

    def callback(params, iter, g):

        n_samples = 3
        plot_inputs = np.linspace(-10, 10, num=500)

        f_bnn = sample_bnn(params, plot_inputs[:, None], n_samples, arch, act)
        fgp = sample_gpp(plot_inputs[:, None], n_samples, kernel=ker)

        for axes in ax:
            axes.cla()
        # ax.plot(x.ravel(), y.ravel(), 'ko')
        ax[0].plot(plot_inputs, fgp.T, color='green')
        ax[1].plot(plot_inputs, f_bnn.T, color='red')
        #ax[0].set_ylim([-5, 5])
        #ax[1].set_ylim([-5, 5])

        plt.draw()
        plt.pause(1.0 / 40.0)

        print("Iteration {} KL {} ".format(iter, kl(params, iter)))

    prior_params = adam(grad(kl),
                        init_var_params(arch),
                        step_size=0.04,
                        num_iters=100,
                        callback=callback)