def gen_sample_from_qei(cpp_gp,
                        cpp_search_domain,
                        sgd_params,
                        num_to_sample,
                        num_mc=1e4,
                        lhc_itr=2e4):
    """
    :param cpp_gp: trained cpp version of GaussianProcess model
    :param cpp_search_domain: cpp version of TensorProductDomain
    :param sgd_params: GradientDescentParameters
    :param num_to_sample: number of points to sample for the next iteration
    :param num_mc: number of Monte Carlo iterations
    :param lhc_itr: number of points used in latin hypercube search
    :return: (points to sample next, expected improvement at this set of points)
    """
    cpp_ei_evaluator = cppExpectedImprovement(gaussian_process=cpp_gp,
                                              num_mc_iterations=int(num_mc))
    #python_ei_evaluator = pythonExpectedImprovement(gaussian_process=cpp_gp, num_mc_iterations=int(num_mc))
    #optimizer = pythonGradientDescentOptimizer(cpp_search_domain, python_ei_evaluator, sgd_params, int(lhc_itr))
    optimizer = cppGradientDescentOptimizer(cpp_search_domain,
                                            cpp_ei_evaluator, sgd_params,
                                            int(lhc_itr))
    points_to_sample_list = []
    ei_list = []
    points_to_sample_list.append(
        multistart_expected_improvement_optimization(optimizer,
                                                     None,
                                                     num_to_sample,
                                                     use_gpu=False,
                                                     which_gpu=0,
                                                     max_num_threads=8))

    cpp_ei_evaluator.set_current_point(points_to_sample_list[0])
    ei_list.append(cpp_ei_evaluator.compute_expected_improvement())
    return points_to_sample_list[numpy.argmax(ei_list)], numpy.amax(ei_list)
Exemplo n.º 2
0
def gen_sample_from_qkg_mcmc(cpp_gp_mcmc, cpp_gp_list, inner_optimizer, cpp_search_domain, num_fidelity,
                             discrete_pts_list, sgd_params, num_to_sample, num_mc=10, lhc_itr=1e3):
    """
    :param cpp_gp_mcmc: trained cpp version of GaussianProcess MCMC model
    :param cpp_gp_list:
    :param inner_optimizer:
    :param cpp_search_domain: cpp version of TensorProductDomain
    :param num_fidelity: number of fidelity control parameters
    :param discrete_pts_list:
    :param sgd_params: GradientDescentParameters
    :param num_mc: number of Monte Carlo iterations
    :param lhc_itr: number of points used in latin hypercube search
    :return: (points to sample next, expected improvement at this set of points)
    """
    cpp_kg_evaluator = cppKnowledgeGradientMCMC(gaussian_process_mcmc = cpp_gp_mcmc, gaussian_process_list=cpp_gp_list,
                                                num_fidelity = num_fidelity, inner_optimizer = inner_optimizer, discrete_pts_list=discrete_pts_list,
                                                num_to_sample = num_to_sample, num_mc_iterations=int(num_mc))
    optimizer = cppGradientDescentOptimizer(cpp_search_domain, cpp_kg_evaluator, sgd_params, int(lhc_itr))
    points_to_sample_list = []
    kg_list = []

    points_to_sample_list.append(multistart_knowledge_gradient_mcmc_optimization(optimizer, inner_optimizer, None, discrete_pts_list,
                                                                                 num_to_sample=num_to_sample,
                                                                                 num_pts=discrete_pts_list[0].shape[0],
                                                                                 max_num_threads=8))

    cpp_kg_evaluator.set_current_point(points_to_sample_list[0])
    kg_list.append(cpp_kg_evaluator.compute_objective_function())
    return points_to_sample_list[numpy.argmax(kg_list)], numpy.amax(kg_list)
def gen_sample_from_qei_mcmc(cpp_gp_mcmc,
                             cpp_search_domain,
                             sgd_params,
                             start_points,
                             num_to_sample,
                             num_mc=1e4,
                             lhc_itr=2e4):
    """
    :param cpp_gp_mcmc: trained cpp version of GaussianProcess MCMC model
    :param cpp_search_domain: cpp version of TensorProductDomain
    :param sgd_params: GradientDescentParameters
    :param num_to_sample: number of points to sample for the next iteration
    :param num_mc: number of Monte Carlo iterations
    :param lhc_itr: number of points used in latin hypercube search
    :return: (points to sample next, expected improvement at this set of points)
    """
    cpp_ei_evaluator = cppExpectedImprovementMCMC(
        gaussian_process_mcmc=cpp_gp_mcmc,
        num_to_sample=num_to_sample,
        num_mc_iterations=int(num_mc))
    optimizer = cppGradientDescentOptimizer(cpp_search_domain,
                                            cpp_ei_evaluator, sgd_params,
                                            int(lhc_itr))
    points_to_sample_list = []
    ei_list = []

    points_to_sample_list.append(
        multistart_expected_improvement_mcmc_optimization(
            ei_optimizer=optimizer,
            num_multistarts=start_points.shape[0],
            start_point_set=start_points,
            num_to_sample=1,
            max_num_threads=1))
    cpp_ei_evaluator.set_current_point(points_to_sample_list[0])
    ei_list.append(cpp_ei_evaluator.compute_objective_function())
    return points_to_sample_list[numpy.argmax(ei_list)], numpy.amax(ei_list)
Exemplo n.º 4
0
            )

            test = np.zeros(eval_pts.shape[0])
            ps_evaluator = PosteriorMean(cpp_gp, num_fidelity)
            for i, pt in enumerate(eval_pts):
                ps_evaluator.set_current_point(
                    pt.reshape(
                        (1, cpp_gp_loglikelihood.dim - objective_func._num_fidelity)
                    )
                )
                test[i] = -ps_evaluator.compute_objective_function()

            initial_point = eval_pts[np.argmin(test)]

            ps_sgd_optimizer = cppGradientDescentOptimizer(
                cpp_inner_search_domain, ps_evaluator, cpp_sgd_params_ps
            )
            report_point = posterior_mean_optimization(
                ps_sgd_optimizer, initial_guess=initial_point, max_num_threads=4
            )

            ps_evaluator.set_current_point(
                report_point.reshape(
                    (1, cpp_gp_loglikelihood.dim - objective_func._num_fidelity)
                )
            )
            if -ps_evaluator.compute_objective_function() > np.min(test):
                report_point = initial_point

            discrete_pts_optima = np.reshape(
                np.append(discrete_pts_optima, report_point),
Exemplo n.º 5
0
hyper_domain = np.zeros((2 + dim + len(derivatives), 2))
hyper_domain[:(2 + dim), :] = objective_func._hyper_domain[:(2 + dim), :]
for g in range(len(derivatives)):
    hyper_domain[g + 2 +
                 dim, :] = objective_func._hyper_domain[derivatives[g] + 2 +
                                                        dim, :]
hyper_search_domain = cppTensorProductDomain(
    [ClosedInterval(bound[0], bound[1]) for bound in hyper_domain])
hyper_params = np.ones(2 + dim + len(derivatives))

cpp_gp = cppGaussianProcess(cpp_cov, hyper_params[(1 + objective_func._dim):],
                            init_data, derivatives)

cpp_gp_loglikelihood = cppGaussianProcessLogLikelihood(
    cpp_cov, cpp_gp.get_historical_data_copy(), noise_variance, derivatives)
sgd_optimizer = cppGradientDescentOptimizer(hyper_search_domain,
                                            cpp_gp_loglikelihood,
                                            cpp_sgd_params_hyper)
hyper_params = multistart_hyperparameter_optimization(
    log_likelihood_optimizer=sgd_optimizer,
    num_multistarts=None,
    max_num_threads=8)

discrete_pts_optima = python_search_domain.generate_uniform_random_points_in_domain(
    1000)

points = sample_from_global_optima(cpp_gp, 1000, objective_func._search_domain,
                                   discrete_pts_optima, 200)
print points
print cpp_gp.compute_mean_of_points(points)