Example #1
0
    def test_optimizer_params_passed_through(self):
        """Test that the optimizer parameters get passed through to the endpoint."""
        test_case = self.gp_test_environments[0]

        python_domain, python_gp = test_case
        python_cov, historical_data = python_gp.get_core_data_copy()

        # Test default test parameters get passed through
        json_payload = json.loads(self._build_json_payload(python_domain, python_cov, historical_data))

        request = pyramid.testing.DummyRequest(post=json_payload)
        request.json_body = json_payload
        view = GpHyperOptView(request)
        params = view.get_params_from_request()
        _, optimizer_parameters, num_random_samples = _make_optimizer_parameters_from_params(params)

        assert optimizer_parameters.num_multistarts == TEST_OPTIMIZER_MULTISTARTS
        assert optimizer_parameters._python_max_num_steps == TEST_GRADIENT_DESCENT_PARAMETERS.max_num_steps

        # Test arbitrary parameters get passed through
        json_payload['optimizer_info']['num_multistarts'] = TEST_OPTIMIZER_MULTISTARTS + 5
        json_payload['optimizer_info']['optimizer_parameters']['max_num_steps'] = TEST_GRADIENT_DESCENT_PARAMETERS.max_num_steps + 10

        request = pyramid.testing.DummyRequest(post=json_payload)
        request.json_body = json_payload
        view = GpHyperOptView(request)
        params = view.get_params_from_request()
        _, optimizer_parameters, num_random_samples = _make_optimizer_parameters_from_params(params)

        assert optimizer_parameters.num_multistarts == TEST_OPTIMIZER_MULTISTARTS + 5

        assert optimizer_parameters._python_max_num_steps == TEST_GRADIENT_DESCENT_PARAMETERS.max_num_steps + 10
Example #2
0
    def test_optimizer_params_passed_through(self):
        """Test that the optimizer parameters get passed through to the endpoint."""
        # TODO(GH-305): turn this into a unit test by going through OptimizableGpPrettyView
        # and mocking out dependencies (instead of awkwardly constructing a more complex object).
        test_case = self.gp_test_environments[0]
        num_to_sample = 1

        python_domain, python_gp = test_case
        python_cov, historical_data = python_gp.get_core_data_copy()

        # Test default test parameters get passed through
        json_payload = json.loads(
            self._build_json_payload(python_domain, python_cov,
                                     historical_data, num_to_sample))

        request = pyramid.testing.DummyRequest(post=json_payload)
        request.json_body = json_payload
        view = GpNextPointsPrettyView(request)
        # get_params_from_request() requires this field is set. value is arbitrary for now.
        # TODO(GH-305): mock out this and other members
        view._route_name = GP_NEXT_POINTS_CONSTANT_LIAR_ROUTE_NAME
        params = view.get_params_from_request()
        _, optimizer_parameters, num_random_samples = _make_optimizer_parameters_from_params(
            params)

        T.assert_equal(
            optimizer_parameters.num_multistarts,
            TEST_OPTIMIZER_MULTISTARTS,
        )

        T.assert_equal(
            optimizer_parameters._python_max_num_steps,
            TEST_GRADIENT_DESCENT_PARAMETERS.max_num_steps,
        )

        # Test arbitrary parameters get passed through
        json_payload['optimizer_info'][
            'num_multistarts'] = TEST_OPTIMIZER_MULTISTARTS + 5
        json_payload['optimizer_info']['optimizer_parameters'][
            'max_num_steps'] = TEST_GRADIENT_DESCENT_PARAMETERS.max_num_steps + 10

        request = pyramid.testing.DummyRequest(post=json_payload)
        request.json_body = json_payload
        view = GpNextPointsPrettyView(request)
        # get_params_from_request() requires this field is set. value is arbitrary for now.
        # TODO(GH-305): mock out this and other members
        view._route_name = GP_NEXT_POINTS_CONSTANT_LIAR_ROUTE_NAME
        params = view.get_params_from_request()
        _, optimizer_parameters, num_random_samples = _make_optimizer_parameters_from_params(
            params)

        T.assert_equal(
            optimizer_parameters.num_multistarts,
            TEST_OPTIMIZER_MULTISTARTS + 5,
        )

        T.assert_equal(
            optimizer_parameters._python_max_num_steps,
            TEST_GRADIENT_DESCENT_PARAMETERS.max_num_steps + 10,
        )
Example #3
0
    def test_optimizer_params_passed_through(self):
        """Test that the optimizer parameters get passed through to the endpoint."""
        # TODO(GH-305): turn this into a unit test by going through OptimizableGpPrettyView
        # and mocking out dependencies (instead of awkwardly constructing a more complex object).
        test_case = self.gp_test_environments[0]
        num_to_sample = 1

        python_domain, python_gp = test_case
        python_cov, historical_data = python_gp.get_core_data_copy()

        # Test default test parameters get passed through
        json_payload = json.loads(self._build_json_payload(python_domain, python_cov, historical_data, num_to_sample))

        request = pyramid.testing.DummyRequest(post=json_payload)
        request.json_body = json_payload
        view = GpNextPointsPrettyView(request)
        # get_params_from_request() requires this field is set. value is arbitrary for now.
        # TODO(GH-305): mock out this and other members
        view._route_name = GP_NEXT_POINTS_CONSTANT_LIAR_ROUTE_NAME
        params = view.get_params_from_request()
        _, optimizer_parameters, num_random_samples = _make_optimizer_parameters_from_params(params)

        T.assert_equal(
                optimizer_parameters.num_multistarts,
                TEST_OPTIMIZER_MULTISTARTS,
                )

        T.assert_equal(
                optimizer_parameters._python_max_num_steps,
                TEST_GRADIENT_DESCENT_PARAMETERS.max_num_steps,
                )

        # Test arbitrary parameters get passed through
        json_payload['optimizer_info']['num_multistarts'] = TEST_OPTIMIZER_MULTISTARTS + 5
        json_payload['optimizer_info']['optimizer_parameters']['max_num_steps'] = TEST_GRADIENT_DESCENT_PARAMETERS.max_num_steps + 10

        request = pyramid.testing.DummyRequest(post=json_payload)
        request.json_body = json_payload
        view = GpNextPointsPrettyView(request)
        # get_params_from_request() requires this field is set. value is arbitrary for now.
        # TODO(GH-305): mock out this and other members
        view._route_name = GP_NEXT_POINTS_CONSTANT_LIAR_ROUTE_NAME
        params = view.get_params_from_request()
        _, optimizer_parameters, num_random_samples = _make_optimizer_parameters_from_params(params)

        T.assert_equal(
                optimizer_parameters.num_multistarts,
                TEST_OPTIMIZER_MULTISTARTS + 5,
                )

        T.assert_equal(
                optimizer_parameters._python_max_num_steps,
                TEST_GRADIENT_DESCENT_PARAMETERS.max_num_steps + 10,
                )
Example #4
0
    def test_optimizer_params_passed_through(self):
        """Test that the optimizer parameters get passed through to the endpoint."""
        test_case = self.gp_test_environments[0]

        python_domain, python_gp = test_case
        python_cov, historical_data = python_gp.get_core_data_copy()

        # Test default test parameters get passed through
        json_payload = json.loads(
            self._build_json_payload(python_domain, python_cov,
                                     historical_data))

        request = pyramid.testing.DummyRequest(post=json_payload)
        request.json_body = json_payload
        view = GpHyperOptView(request)
        params = view.get_params_from_request()
        _, optimizer_parameters, num_random_samples = _make_optimizer_parameters_from_params(
            params)

        T.assert_equal(
            optimizer_parameters.num_multistarts,
            TEST_OPTIMIZER_MULTISTARTS,
        )

        T.assert_equal(
            optimizer_parameters._python_max_num_steps,
            TEST_GRADIENT_DESCENT_PARAMETERS.max_num_steps,
        )

        # Test arbitrary parameters get passed through
        json_payload['optimizer_info'][
            'num_multistarts'] = TEST_OPTIMIZER_MULTISTARTS + 5
        json_payload['optimizer_info']['optimizer_parameters'][
            'max_num_steps'] = TEST_GRADIENT_DESCENT_PARAMETERS.max_num_steps + 10

        request = pyramid.testing.DummyRequest(post=json_payload)
        request.json_body = json_payload
        view = GpHyperOptView(request)
        params = view.get_params_from_request()
        _, optimizer_parameters, num_random_samples = _make_optimizer_parameters_from_params(
            params)

        T.assert_equal(
            optimizer_parameters.num_multistarts,
            TEST_OPTIMIZER_MULTISTARTS + 5,
        )

        T.assert_equal(
            optimizer_parameters._python_max_num_steps,
            TEST_GRADIENT_DESCENT_PARAMETERS.max_num_steps + 10,
        )
Example #5
0
    def gp_hyper_opt_view(self):
        """Endpoint for gp_hyper_opt POST requests.

        .. http:post:: /gp/hyper_opt

           Calculates the optimal hyperparameters for a gaussian process, given historical data.

           :input: :class:`moe.views.schemas.rest.gp_hyper_opt.GpHyperOptRequest`
           :output: :class:`moe.views.schemas.rest.gp_hyper_opt.GpHyperOptResponse`

           :status 200: returns a response
           :status 500: server error

        """
        params = self.get_params_from_request()

        max_num_threads = params.get('max_num_threads')
        hyperparameter_domain = _make_domain_from_params(params, domain_info_key='hyperparameter_domain_info')
        gaussian_process = _make_gp_from_params(params)
        covariance_of_process, historical_data = gaussian_process.get_core_data_copy()
        optimizer_class, optimizer_parameters, num_random_samples = _make_optimizer_parameters_from_params(params)
        log_likelihood_type = params.get('log_likelihood_info')

        log_likelihood_eval = LOG_LIKELIHOOD_TYPES_TO_LOG_LIKELIHOOD_METHODS[log_likelihood_type].log_likelihood_class(
            covariance_of_process,
            historical_data,
        )

        log_likelihood_optimizer = optimizer_class(
            hyperparameter_domain,
            log_likelihood_eval,
            optimizer_parameters,
            num_random_samples=num_random_samples,
        )

        hyperopt_status = {}
        with timing_context(MODEL_SELECTION_TIMING_LABEL):
            optimized_hyperparameters = multistart_hyperparameter_optimization(
                log_likelihood_optimizer,
                optimizer_parameters.num_multistarts,
                max_num_threads=max_num_threads,
                status=hyperopt_status,
            )

        covariance_of_process.hyperparameters = optimized_hyperparameters

        log_likelihood_eval.current_point = optimized_hyperparameters

        return self.form_response({
                'endpoint': self._route_name,
                'covariance_info': covariance_of_process.get_json_serializable_info(),
                'status': {
                    'log_likelihood': log_likelihood_eval.compute_log_likelihood(),
                    'grad_log_likelihood': log_likelihood_eval.compute_grad_log_likelihood().tolist(),
                    'optimizer_success': hyperopt_status,
                    },
                })
Example #6
0
    def test_optimizer_params_passed_through(self):
        """Test that the optimizer parameters get passed through to the endpoint."""
        # TODO(GH-305): turn this into a unit test by going through OptimizableGpPrettyView
        # and mocking out dependencies (instead of awkwardly constructing a more complex object).
        test_case = self.gp_test_environments[0]
        num_to_sample = 1

        python_domain, python_gp = test_case
        python_cov, historical_data = python_gp.get_core_data_copy()

        # Test default test parameters get passed through
        json_payload = json.loads(self._build_json_payload(python_domain, python_cov, historical_data, num_to_sample))

        request = pyramid.testing.DummyRequest(post=json_payload)
        request.json_body = json_payload
        view = GpNextPointsPrettyView(request)
        # get_params_from_request() requires this field is set. value is arbitrary for now.
        # TODO(GH-305): mock out this and other members
        view._route_name = GP_NEXT_POINTS_CONSTANT_LIAR_ROUTE_NAME
        params = view.get_params_from_request()
        _, optimizer_parameters, num_random_samples = _make_optimizer_parameters_from_params(params)
        test_param_dict = TEST_GRADIENT_DESCENT_PARAMETERS._asdict()
        test_param_dict["num_multistarts"] = TEST_OPTIMIZER_MULTISTARTS
        assert optimizer_parameters._get_member_dict() == test_param_dict

        # Test arbitrary parameters get passed through
        for i, key in enumerate(test_param_dict.iterkeys()):
            test_param_dict[key] /= 2
        test_num_multistarts = test_param_dict.pop("num_multistarts")

        json_payload["optimizer_info"]["num_multistarts"] = test_num_multistarts
        json_payload["optimizer_info"]["optimizer_parameters"] = test_param_dict

        request = pyramid.testing.DummyRequest(post=json_payload)
        request.json_body = json_payload
        view = GpNextPointsPrettyView(request)
        # get_params_from_request() requires this field is set. value is arbitrary for now.
        # TODO(GH-305): mock out this and other members
        view._route_name = GP_NEXT_POINTS_CONSTANT_LIAR_ROUTE_NAME
        params = view.get_params_from_request()
        _, optimizer_parameters, num_random_samples = _make_optimizer_parameters_from_params(params)

        test_param_dict["num_multistarts"] = test_num_multistarts
        assert optimizer_parameters._get_member_dict() == test_param_dict
Example #7
0
    def test_optimizer_params_passed_through(self):
        """Test that the optimizer parameters get passed through to the endpoint."""
        test_case = self.gp_test_environments[0]

        python_domain, python_gp = test_case
        python_cov, historical_data = python_gp.get_core_data_copy()

        # Test default test parameters get passed through
        json_payload = json.loads(
            self._build_json_payload(python_domain, python_cov,
                                     historical_data))

        request = pyramid.testing.DummyRequest(post=json_payload)
        request.json_body = json_payload
        view = GpHyperOptView(request)
        params = view.get_params_from_request()
        _, optimizer_parameters, num_random_samples = _make_optimizer_parameters_from_params(
            params)

        test_param_dict = TEST_GRADIENT_DESCENT_PARAMETERS._asdict()
        test_param_dict['num_multistarts'] = TEST_OPTIMIZER_MULTISTARTS
        assert optimizer_parameters._get_member_dict() == test_param_dict

        # Test arbitrary parameters get passed through
        for i, key in enumerate(iter(test_param_dict.keys())):
            test_param_dict[key] /= 2
        test_num_multistarts = test_param_dict.pop('num_multistarts')

        json_payload['optimizer_info'][
            'num_multistarts'] = test_num_multistarts
        json_payload['optimizer_info'][
            'optimizer_parameters'] = test_param_dict

        request = pyramid.testing.DummyRequest(post=json_payload)
        request.json_body = json_payload
        view = GpHyperOptView(request)
        params = view.get_params_from_request()
        _, optimizer_parameters, num_random_samples = _make_optimizer_parameters_from_params(
            params)

        test_param_dict['num_multistarts'] = test_num_multistarts
        assert optimizer_parameters._get_member_dict() == test_param_dict
Example #8
0
    def test_optimizer_params_passed_through(self):
        """Test that the optimizer parameters get passed through to the endpoint."""
        test_case = self.gp_test_environments[0]

        python_domain, python_gp = test_case
        python_cov, historical_data = python_gp.get_core_data_copy()

        # Test default test parameters get passed through
        json_payload = json.loads(self._build_json_payload(python_domain, python_cov, historical_data))

        request = pyramid.testing.DummyRequest(post=json_payload)
        request.json_body = json_payload
        view = GpHyperOptView(request)
        params = view.get_params_from_request()
        _, optimizer_parameters, num_random_samples = _make_optimizer_parameters_from_params(params)

        test_param_dict = TEST_GRADIENT_DESCENT_PARAMETERS._asdict()
        test_param_dict['num_multistarts'] = TEST_OPTIMIZER_MULTISTARTS
        assert optimizer_parameters._get_member_dict() == test_param_dict

        # Test arbitrary parameters get passed through
        for i, key in enumerate(test_param_dict.iterkeys()):
            test_param_dict[key] /= 2
        test_num_multistarts = test_param_dict.pop('num_multistarts')

        json_payload['optimizer_info']['num_multistarts'] = test_num_multistarts
        json_payload['optimizer_info']['optimizer_parameters'] = test_param_dict

        request = pyramid.testing.DummyRequest(post=json_payload)
        request.json_body = json_payload
        view = GpHyperOptView(request)
        params = view.get_params_from_request()
        _, optimizer_parameters, num_random_samples = _make_optimizer_parameters_from_params(params)

        test_param_dict['num_multistarts'] = test_num_multistarts
        assert optimizer_parameters._get_member_dict() == test_param_dict
    def compute_next_points_to_sample_response(self, params, optimizer_method_name, route_name, *args, **kwargs):
        """Compute the next points to sample (and their expected improvement) using optimizer_method_name from params in the request.

        .. Warning:: Attempting to find ``num_to_sample`` optimal points with
          ``num_sampled < num_to_sample`` historical points sampled can cause matrix issues under
          some conditions. Try requesting ``num_to_sample < num_sampled`` points for better
          performance. To bootstrap more points try sampling at random, or from a grid.

        :param request_params: the deserialized REST request, containing ei_optimizer_parameters and gp_historical_info
        :type request_params: a deserialized self.request_schema object as a dict
        :param optimizer_method_name: the optimization method to use
        :type optimizer_method_name: string in :const:`moe.views.constant.NEXT_POINTS_OPTIMIZER_METHOD_NAMES`
        :param route_name: name of the route being called
        :type route_name: string in :const:`moe.views.constant.ALL_REST_ROUTES_ROUTE_NAME_TO_ENDPOINT`
        :param ``*args``: extra args to be passed to optimization method
        :param ``**kwargs``: extra kwargs to be passed to optimization method

        """
        points_being_sampled = numpy.array(params.get('points_being_sampled'))
        num_to_sample = params.get('num_to_sample')
        num_mc_iterations = params.get('mc_iterations')
        max_num_threads = params.get('max_num_threads')

        gaussian_process = _make_gp_from_params(params)

        ei_opt_status = {}
        # TODO(GH-89): Make the optimal_learning library handle this case 'organically' with
        # reasonable default behavior and remove hacks like this one.
        if gaussian_process.num_sampled == 0:
            # If there is no initial data we bootstrap with random points
            py_domain = _make_domain_from_params(params, python_version=True)
            next_points = py_domain.generate_uniform_random_points_in_domain(num_to_sample)
            ei_opt_status['found_update'] = True
            expected_improvement_evaluator = PythonExpectedImprovement(
                    gaussian_process,
                    points_being_sampled=points_being_sampled,
                    num_mc_iterations=num_mc_iterations,
                    )
        else:
            # Calculate the next best points to sample given the historical data

            optimizer_class, optimizer_parameters, num_random_samples = _make_optimizer_parameters_from_params(params)

            if optimizer_class == python_optimization.LBFGSBOptimizer:
                domain = RepeatedDomain(num_to_sample, _make_domain_from_params(params, python_version=True))
                expected_improvement_evaluator = PythonExpectedImprovement(
                        gaussian_process,
                        points_being_sampled=points_being_sampled,
                        num_mc_iterations=num_mc_iterations,
                        mvndst_parameters=_make_mvndst_parameters_from_params(params)
                        )

                opt_method = getattr(moe.optimal_learning.python.python_version.expected_improvement, optimizer_method_name)
            else:
                domain = _make_domain_from_params(params, python_version=False)
                expected_improvement_evaluator = ExpectedImprovement(
                        gaussian_process,
                        points_being_sampled=points_being_sampled,
                        num_mc_iterations=num_mc_iterations,
                        )

                opt_method = getattr(moe.optimal_learning.python.cpp_wrappers.expected_improvement, optimizer_method_name)

            expected_improvement_optimizer = optimizer_class(
                    domain,
                    expected_improvement_evaluator,
                    optimizer_parameters,
                    num_random_samples=num_random_samples,
                    )

            with timing_context(EPI_OPTIMIZATION_TIMING_LABEL):
                next_points = opt_method(
                    expected_improvement_optimizer,
                    params.get('optimizer_info')['num_multistarts'],  # optimizer_parameters.num_multistarts,
                    num_to_sample,
                    max_num_threads=max_num_threads,
                    status=ei_opt_status,
                    *args,
                    **kwargs
                )

        # TODO(GH-285): Use analytic q-EI here
        # TODO(GH-314): Need to resolve poential issue with NaNs before using q-EI here
        # It may be sufficient to check found_update == False in ei_opt_status
        # and then use q-EI, else set EI = 0.
        expected_improvement_evaluator.current_point = next_points
        # The C++ may fail to compute EI with some ``next_points`` inputs (e.g.,
        # ``points_to_sample`` and ``points_begin_sampled`` are too close
        # together or too close to ``points_sampled``). We catch the exception when this happens
        # and attempt a more numerically robust option.
        try:
            expected_improvement = expected_improvement_evaluator.compute_expected_improvement()
        except Exception as exception:
            self.log.info('EI computation failed, probably b/c GP-variance matrix is singular. Error: {0:s}'.format(exception))

            # ``_compute_expected_improvement_monte_carlo`` in
            # :class:`moe.optimal_learning.python.python_version.expected_improvement.ExpectedImprovement`
            # has a more reliable (but very expensive) way to deal with singular variance matrices.
            python_ei_eval = PythonExpectedImprovement(
                expected_improvement_evaluator._gaussian_process,
                points_to_sample=next_points,
                points_being_sampled=points_being_sampled,
                num_mc_iterations=num_mc_iterations,
            )
            expected_improvement = python_ei_eval.compute_expected_improvement(force_monte_carlo=True)

        return self.form_response({
                'endpoint': route_name,
                'points_to_sample': next_points.tolist(),
                'status': {
                    'expected_improvement': expected_improvement,
                    'optimizer_success': ei_opt_status,
                    },
                })
    def compute_next_points_to_sample_response(self, params,
                                               optimizer_method_name,
                                               route_name, *args, **kwargs):
        """Compute the next points to sample (and their expected improvement) using optimizer_method_name from params in the request.

        .. Warning:: Attempting to find ``num_to_sample`` optimal points with
          ``num_sampled < num_to_sample`` historical points sampled can cause matrix issues under
          some conditions. Try requesting ``num_to_sample < num_sampled`` points for better
          performance. To bootstrap more points try sampling at random, or from a grid.

        :param request_params: the deserialized REST request, containing ei_optimizer_parameters and gp_historical_info
        :type request_params: a deserialized self.request_schema object as a dict
        :param optimizer_method_name: the optimization method to use
        :type optimizer_method_name: string in :const:`moe.views.constant.NEXT_POINTS_OPTIMIZER_METHOD_NAMES`
        :param route_name: name of the route being called
        :type route_name: string in :const:`moe.views.constant.ALL_REST_ROUTES_ROUTE_NAME_TO_ENDPOINT`
        :param ``*args``: extra args to be passed to optimization method
        :param ``**kwargs``: extra kwargs to be passed to optimization method

        """
        points_being_sampled = numpy.array(params.get('points_being_sampled'))
        num_to_sample = params.get('num_to_sample')
        num_mc_iterations = params.get('mc_iterations')
        max_num_threads = params.get('max_num_threads')

        gaussian_process = _make_gp_from_params(params)

        ei_opt_status = {}
        # TODO(GH-89): Make the optimal_learning library handle this case 'organically' with
        # reasonable default behavior and remove hacks like this one.
        if gaussian_process.num_sampled == 0:
            # If there is no initial data we bootstrap with random points
            py_domain = _make_domain_from_params(params, python_version=True)
            next_points = py_domain.generate_uniform_random_points_in_domain(
                num_to_sample)
            ei_opt_status['found_update'] = True
            expected_improvement_evaluator = PythonExpectedImprovement(
                gaussian_process,
                points_being_sampled=points_being_sampled,
                num_mc_iterations=num_mc_iterations,
            )
        else:
            # Calculate the next best points to sample given the historical data

            optimizer_class, optimizer_parameters, num_random_samples = _make_optimizer_parameters_from_params(
                params)

            if optimizer_class == python_optimization.LBFGSBOptimizer:
                domain = RepeatedDomain(
                    num_to_sample,
                    _make_domain_from_params(params, python_version=True))
                expected_improvement_evaluator = PythonExpectedImprovement(
                    gaussian_process,
                    points_being_sampled=points_being_sampled,
                    num_mc_iterations=num_mc_iterations,
                    mvndst_parameters=_make_mvndst_parameters_from_params(
                        params))

                opt_method = getattr(
                    moe.optimal_learning.python.python_version.
                    expected_improvement, optimizer_method_name)
            else:
                domain = _make_domain_from_params(params, python_version=False)
                expected_improvement_evaluator = ExpectedImprovement(
                    gaussian_process,
                    points_being_sampled=points_being_sampled,
                    num_mc_iterations=num_mc_iterations,
                )

                opt_method = getattr(
                    moe.optimal_learning.python.cpp_wrappers.
                    expected_improvement, optimizer_method_name)

            expected_improvement_optimizer = optimizer_class(
                domain,
                expected_improvement_evaluator,
                optimizer_parameters,
                num_random_samples=num_random_samples,
            )

            with timing_context(EPI_OPTIMIZATION_TIMING_LABEL):
                next_points = opt_method(
                    expected_improvement_optimizer,
                    params.get('optimizer_info')
                    ['num_multistarts'],  # optimizer_parameters.num_multistarts,
                    num_to_sample,
                    max_num_threads=max_num_threads,
                    status=ei_opt_status,
                    *args,
                    **kwargs)

        # TODO(GH-285): Use analytic q-EI here
        # TODO(GH-314): Need to resolve poential issue with NaNs before using q-EI here
        # It may be sufficient to check found_update == False in ei_opt_status
        # and then use q-EI, else set EI = 0.
        expected_improvement_evaluator.current_point = next_points
        # The C++ may fail to compute EI with some ``next_points`` inputs (e.g.,
        # ``points_to_sample`` and ``points_begin_sampled`` are too close
        # together or too close to ``points_sampled``). We catch the exception when this happens
        # and attempt a more numerically robust option.
        try:
            expected_improvement = expected_improvement_evaluator.compute_expected_improvement(
            )
        except Exception as exception:
            self.log.info(
                'EI computation failed, probably b/c GP-variance matrix is singular. Error: {0:s}'
                .format(exception))

            # ``_compute_expected_improvement_monte_carlo`` in
            # :class:`moe.optimal_learning.python.python_version.expected_improvement.ExpectedImprovement`
            # has a more reliable (but very expensive) way to deal with singular variance matrices.
            python_ei_eval = PythonExpectedImprovement(
                expected_improvement_evaluator._gaussian_process,
                points_to_sample=next_points,
                points_being_sampled=points_being_sampled,
                num_mc_iterations=num_mc_iterations,
            )
            expected_improvement = python_ei_eval.compute_expected_improvement(
                force_monte_carlo=True)

        return self.form_response({
            'endpoint': route_name,
            'points_to_sample': next_points.tolist(),
            'status': {
                'expected_improvement': expected_improvement,
                'optimizer_success': ei_opt_status,
            },
        })