コード例 #1
0
def build_deep_gp(input_dimension, hidden_dimension, covariance_function):

    # GP going from input to hidden
    num_params_layer1, predict_layer1, log_marginal_likelihood_layer1 = \
        make_gp_funs(covariance_function, num_cov_params=input_dimension + 1)

    # GP going from hidden to output
    num_params_layer2, predict_layer2, log_marginal_likelihood_layer2 = \
        make_gp_funs(covariance_function, num_cov_params=hidden_dimension + 1)

    num_hidden_params = hidden_dimension * n_data
    total_num_params = num_params_layer1 + num_params_layer2 + num_hidden_params

    def unpack_all_params(all_params):
        layer1_params = all_params[:num_params_layer1]
        layer2_params = all_params[num_params_layer1:num_params_layer1+num_params_layer2]
        hiddens = all_params[num_params_layer1 + num_params_layer2:]
        return layer1_params, layer2_params, hiddens

    def combined_predict_fun(all_params, X, y, xs):
        layer1_params, layer2_params, hiddens = unpack_all_params(all_params)
        h_star_mean, h_star_cov = predict_layer1(layer1_params, X, hiddens, xs)
        y_star_mean, y_star_cov = predict_layer2(layer2_params, np.atleast_2d(hiddens).T, y, np.atleast_2d(h_star_mean).T)
        return y_star_mean, y_star_cov

    def log_marginal_likelihood(all_params):
        layer1_params, layer2_params, h = unpack_all_params(all_params)
        return log_marginal_likelihood_layer1(layer1_params, X, h) + \
               log_marginal_likelihood_layer2(layer2_params, np.atleast_2d(h).T, y)

    predict_layer_funcs = [predict_layer1, predict_layer2]

    return total_num_params, log_marginal_likelihood, combined_predict_fun, unpack_all_params, \
           predict_layer_funcs
コード例 #2
0
def bayesian_optimize(func, domain_min, domain_max, num_iters=20, callback=None):

    D = len(domain_min)

    num_params, predict, log_marginal_likelihood = \
        make_gp_funs(rbf_covariance, num_cov_params=D + 1)

    model_params = init_covariance_params(num_params)

    def optimize_gp_params(init_params, X, y):
        log_hyperprior = lambda params: np.sum(norm.logpdf(params, 0., 100.))
        objective = lambda params: -log_marginal_likelihood(params, X, y) -log_hyperprior(params)
        return minimize(value_and_grad(objective), init_params, jac=True, method='CG').x

    def choose_next_point(domain_min, domain_max, acquisition_function, num_tries=15, rs=npr.RandomState(0)):
        """Uses gradient-based optimization to find next query point."""
        init_points = rs.rand(num_tries, D) * (domain_max - domain_min) + domain_min

        grad_obj = value_and_grad(lambda x: -acquisition_function(x))
        def optimize_point(init_point):
            print('.', end='')
            result = minimize(grad_obj, x0=init_point, jac=True, method='L-BFGS-B',
                              options={'maxiter': 10}, bounds=list(zip(domain_min, domain_max)))
            return result.x, acquisition_function(result.x)
        optimzed_points, optimized_values = list(zip(*list(map(optimize_point, init_points))))
        print()
        best_ix = np.argmax(optimized_values)
        return np.atleast_2d(optimzed_points[best_ix])


    # Start by evaluating once in the middle of the domain.
    X = np.zeros((0, D))
    y = np.zeros((0))
    X = np.concatenate((X, np.reshape((domain_max - domain_min) / 2.0, (D, 1))))
    y = np.concatenate((y, np.reshape(np.array(func(X)), (1,))))

    for i in range(num_iters):
        if i > 1:
            print("Optimizing model parameters...")
            model_params = optimize_gp_params(model_params, X, y)

        print("Choosing where to look next", end='')
        def predict_func(xstar):
            mean, cov = predict(model_params, X, y, xstar)
            return mean, np.sqrt(np.diag(cov))

        def acquisition_function(xstar):
            xstar = np.atleast_2d(xstar)  # To work around a bug in scipy.minimize
            mean, std = predict_func(xstar)
            return expected_new_max(mean, std, defaultmax(y))
        next_point = choose_next_point(domain_min, domain_max, acquisition_function)

        print("Evaluating expensive function...")
        new_value = func(next_point)

        X = np.concatenate((X, next_point))
        y = np.concatenate((y, np.reshape(np.array(new_value), (1,))))

        if callback:
            callback(X, y, predict_func, acquisition_function, next_point, new_value)

    best_ix = np.argmax(y)
    return X[best_ix, :], y[best_ix]
コード例 #3
0
ファイル: maxsvi.py プロジェクト: blutooth/gp-svi
    gradient = grad(variational_objective)

    return variational_objective, gradient, unpack_params



if __name__ == '__main__':

    # Specify an inference problem by its unnormalized log-posterior.
    D = 10;dim=1;N=20;M=10;
    params = [0, - 6.32795237, - 0.69221531, - 0.24707744]

    # Build params = [0, - 6.32795237, - 0.69221531, - 0.24707744]model and objective function.
    num_params, predict, log_marginal_likelihood = \
        gp.make_gp_funs(gp.rbf_covariance, num_cov_params=D + 1)

    X, y = gp.build_toy_dataset(D=dim,n_data=N)
    pseudo, blah = gp.build_toy_dataset(D=dim,n_data=M)
    out,blah=gp.build_toy_dataset(D=dim,n_data=100)

    objective = lambda params: -log_marginal_likelihood(params, X, y)

    def log_posterior(x,inputs ,len_sc,variance, t):
        N=x.shape
        sum_prob=0
        params = [0, len_sc, variance, - 0.24707744]
        for i in range(N[0]):

            """An example 2D intractable distribution:
            a Gaussian evaluated at zero with a Gaussian prior on the log-variance."""
コード例 #4
0
    x = np.linspace(0.1, 1, num_per_class)
    xs = np.concatenate([rate *x * np.cos(angle + x * rate) + noise_std * rs.randn(num_per_class)
                         for angle in spoke_angles])
    ys = np.concatenate([rate *x * np.sin(angle + x * rate) + noise_std * rs.randn(num_per_class)
                         for angle in spoke_angles])
    return np.concatenate([np.expand_dims(xs, 1), np.expand_dims(ys,1)], axis=1)


if __name__ == '__main__':

    data_dimension = 2   # Normally the data dimension would be much higher.
    latent_dimension = 2

    # Build model and objective function.
    params_per_gp, predict, log_marginal_likelihood = \
        make_gp_funs(rbf_covariance, num_cov_params=latent_dimension + 1)
    total_gp_params = data_dimension * params_per_gp

    data = make_pinwheel_data(5, 40)
    datalen = data.shape[0]

    num_latent_params = datalen * latent_dimension

    def unpack_params(params):
        gp_params = np.reshape(params[:total_gp_params], (data_dimension, params_per_gp))
        latents   = np.reshape(params[total_gp_params:], (datalen, latent_dimension))
        return gp_params, latents

    def objective(params):
        gp_params, latents = unpack_params(params)
        gp_likelihood = sum([log_marginal_likelihood(gp_params[i], latents, data[:, i])
コード例 #5
0
    ys = np.concatenate([
        rate * x * np.sin(angle + x * rate) +
        noise_std * rs.randn(num_per_class) for angle in spoke_angles
    ])
    return np.concatenate(
        [np.expand_dims(xs, 1), np.expand_dims(ys, 1)], axis=1)


if __name__ == '__main__':

    data_dimension = 2  # Normally the data dimension would be much higher.
    latent_dimension = 2

    # Build model and objective function.
    params_per_gp, predict, log_marginal_likelihood = \
        make_gp_funs(rbf_covariance, num_cov_params=latent_dimension + 1)
    total_gp_params = data_dimension * params_per_gp

    data = make_pinwheel_data(5, 40)
    datalen = data.shape[0]

    num_latent_params = datalen * latent_dimension

    def unpack_params(params):
        gp_params = np.reshape(params[:total_gp_params],
                               (data_dimension, params_per_gp))
        latents = np.reshape(params[total_gp_params:],
                             (datalen, latent_dimension))
        return gp_params, latents

    def objective(params):