def gp_next_points(moe_experiment, method_route_name=GP_NEXT_POINTS_EPI_ROUTE_NAME, rest_host=DEFAULT_HOST, rest_port=DEFAULT_PORT, testapp=None, **kwargs): """Hit the rest endpoint for finding next point of highest EI at rest_host:rest_port corresponding to the method with the given experiment.""" endpoint = ALL_REST_ROUTES_ROUTE_NAME_TO_ENDPOINT[method_route_name] raw_payload = kwargs.copy( ) # Any options can be set via the kwargs ('covariance_info' etc.) experiment_payload = moe_experiment.build_json_payload() if 'gp_historical_info' not in raw_payload: raw_payload['gp_historical_info'] = experiment_payload.get( 'gp_historical_info') if 'domain_info' not in raw_payload: raw_payload['domain_info'] = experiment_payload.get('domain_info') json_payload = json.dumps(raw_payload) json_response = call_endpoint_with_payload(rest_host, rest_port, endpoint, json_payload, testapp) output = GpNextPointsResponse().deserialize(json_response) return output["points_to_sample"]
def test_all_constant_liar_methods_function(self): """Test that each contant liar ``lie_method`` runs to completion. This is an integration test.""" for test_case in self.gp_test_environments: python_domain, python_gp = test_case python_cov, historical_data = python_gp.get_core_data_copy() for constant_liar_method in CONSTANT_LIAR_METHODS: json_payload = self._build_json_payload( python_domain, python_cov, historical_data, 2, # num_to_sample lie_method=constant_liar_method, ) resp = self.testapp.post(GP_NEXT_POINTS_CONSTANT_LIAR_ENDPOINT, json_payload) resp_schema = GpNextPointsResponse() resp_dict = resp_schema.deserialize(json.loads(resp.body)) assert 'points_to_sample' in resp_dict assert len(resp_dict['points_to_sample']) == 2 # num_to_sample assert len(resp_dict['points_to_sample'][0]) == python_gp.dim assert 'status' in resp_dict assert 'expected_improvement' in resp_dict['status'] assert resp_dict['status']['expected_improvement'] >= 0.0
def test_interface_returns_same_as_cpp(self): """Integration test for the /gp/next_points/* endpoints.""" for moe_route in ALL_NEXT_POINTS_MOE_ROUTES: for test_case in self.gp_test_environments: for num_to_sample in (1, 2, 4): python_domain, python_gp = test_case python_cov, historical_data = python_gp.get_core_data_copy( ) # Next point from REST if moe_route.route_name == GP_NEXT_POINTS_CONSTANT_LIAR_ROUTE_NAME: json_payload = self._build_json_payload( python_domain, python_cov, historical_data, num_to_sample, lie_value=0.0) elif moe_route.route_name == GP_NEXT_POINTS_EPI_ROUTE_NAME and num_to_sample > 1: json_payload = self._build_json_payload( python_domain, python_cov, historical_data, num_to_sample, l_bfgs_b=True) else: json_payload = self._build_json_payload( python_domain, python_cov, historical_data, num_to_sample) resp = self.testapp.post(moe_route.endpoint, json_payload) resp_schema = GpNextPointsResponse() resp_dict = resp_schema.deserialize(json.loads(resp.body)) T.assert_in('points_to_sample', resp_dict) T.assert_equal(len(resp_dict['points_to_sample']), num_to_sample) T.assert_equal(len(resp_dict['points_to_sample'][0]), python_gp.dim) T.assert_in('status', resp_dict) T.assert_in('expected_improvement', resp_dict['status']) T.assert_gte(resp_dict['status']['expected_improvement'], 0.0)
class GpNextPointsPrettyView(OptimizableGpPrettyView): """A class to encapsulate 'pretty' ``gp_next_points_*`` views; e.g., :class:`moe.views.rest.gp_next_points_epi.GpNextPointsEpi`. Extends :class:`moe.views.optimizable_gp_pretty_view.GpPrettyView` with: 1. gaussian_process generation from params 2. Converting params into a C++ consumable set of optimizer parameters 3. A method (compute_next_points_to_sample_response) for computing the next best points to sample from a gaussian_process """ request_schema = GpNextPointsRequest() response_schema = GpNextPointsResponse() _pretty_default_request = { "num_to_sample": 1, "gp_historical_info": GpPrettyView._pretty_default_gp_historical_info, "domain_info": { "dim": 1, "domain_bounds": [ { "min": 0.0, "max": 1.0, }, ], }, } def compute_next_points_to_sample_response(self, params, optimizer_method_name, route_name, *args, **kwargs): """Compute the next points to sample (and their expected improvement) using optimizer_method_name from params in the request. .. Warning:: Attempting to find ``num_to_sample`` optimal points with ``num_sampled < num_to_sample`` historical points sampled can cause matrix issues under some conditions. Try requesting ``num_to_sample < num_sampled`` points for better performance. To bootstrap more points try sampling at random, or from a grid. :param request_params: the deserialized REST request, containing ei_optimizer_parameters and gp_historical_info :type request_params: a deserialized self.request_schema object as a dict :param optimizer_method_name: the optimization method to use :type optimizer_method_name: string in :const:`moe.views.constant.NEXT_POINTS_OPTIMIZER_METHOD_NAMES` :param route_name: name of the route being called :type route_name: string in :const:`moe.views.constant.ALL_REST_ROUTES_ROUTE_NAME_TO_ENDPOINT` :param ``*args``: extra args to be passed to optimization method :param ``**kwargs``: extra kwargs to be passed to optimization method """ points_being_sampled = numpy.array(params.get('points_being_sampled')) num_to_sample = params.get('num_to_sample') num_mc_iterations = params.get('mc_iterations') max_num_threads = params.get('max_num_threads') gaussian_process = _make_gp_from_params(params) ei_opt_status = {} # TODO(GH-89): Make the optimal_learning library handle this case 'organically' with # reasonable default behavior and remove hacks like this one. if gaussian_process.num_sampled == 0: # If there is no initial data we bootstrap with random points py_domain = _make_domain_from_params(params, python_version=True) next_points = py_domain.generate_uniform_random_points_in_domain( num_to_sample) ei_opt_status['found_update'] = True expected_improvement_evaluator = PythonExpectedImprovement( gaussian_process, points_being_sampled=points_being_sampled, num_mc_iterations=num_mc_iterations, ) else: # Calculate the next best points to sample given the historical data optimizer_class, optimizer_parameters, num_random_samples = _make_optimizer_parameters_from_params( params) if optimizer_class == python_optimization.LBFGSBOptimizer: domain = RepeatedDomain( num_to_sample, _make_domain_from_params(params, python_version=True)) expected_improvement_evaluator = PythonExpectedImprovement( gaussian_process, points_being_sampled=points_being_sampled, num_mc_iterations=num_mc_iterations, mvndst_parameters=_make_mvndst_parameters_from_params( params)) opt_method = getattr( moe.optimal_learning.python.python_version. expected_improvement, optimizer_method_name) else: domain = _make_domain_from_params(params, python_version=False) expected_improvement_evaluator = ExpectedImprovement( gaussian_process, points_being_sampled=points_being_sampled, num_mc_iterations=num_mc_iterations, ) opt_method = getattr( moe.optimal_learning.python.cpp_wrappers. expected_improvement, optimizer_method_name) expected_improvement_optimizer = optimizer_class( domain, expected_improvement_evaluator, optimizer_parameters, num_random_samples=num_random_samples, ) with timing_context(EPI_OPTIMIZATION_TIMING_LABEL): next_points = opt_method( expected_improvement_optimizer, params.get('optimizer_info') ['num_multistarts'], # optimizer_parameters.num_multistarts, num_to_sample, max_num_threads=max_num_threads, status=ei_opt_status, *args, **kwargs) # TODO(GH-285): Use analytic q-EI here # TODO(GH-314): Need to resolve poential issue with NaNs before using q-EI here # It may be sufficient to check found_update == False in ei_opt_status # and then use q-EI, else set EI = 0. expected_improvement_evaluator.current_point = next_points # The C++ may fail to compute EI with some ``next_points`` inputs (e.g., # ``points_to_sample`` and ``points_begin_sampled`` are too close # together or too close to ``points_sampled``). We catch the exception when this happens # and attempt a more numerically robust option. try: expected_improvement = expected_improvement_evaluator.compute_expected_improvement( ) except Exception as exception: self.log.info( 'EI computation failed, probably b/c GP-variance matrix is singular. Error: {0:s}' .format(exception)) # ``_compute_expected_improvement_monte_carlo`` in # :class:`moe.optimal_learning.python.python_version.expected_improvement.ExpectedImprovement` # has a more reliable (but very expensive) way to deal with singular variance matrices. python_ei_eval = PythonExpectedImprovement( expected_improvement_evaluator._gaussian_process, points_to_sample=next_points, points_being_sampled=points_being_sampled, num_mc_iterations=num_mc_iterations, ) expected_improvement = python_ei_eval.compute_expected_improvement( force_monte_carlo=True) return self.form_response({ 'endpoint': route_name, 'points_to_sample': next_points.tolist(), 'status': { 'expected_improvement': expected_improvement, 'optimizer_success': ei_opt_status, }, })