Example #1
0
def test_optimize_normalize_Y():
    np.random.seed(42)
    arr_range = np.array([
        [0.0, 10.0],
        [-2.0, 2.0],
        [-5.0, 5.0],
    ])
    dim_X = arr_range.shape[0]
    num_X = 5
    X = np.random.randn(num_X, dim_X)
    Y = np.random.randn(num_X, 1)

    model_bo = BO(arr_range, normalize_Y=True)
    next_point, dict_info = model_bo.optimize(X, Y)
    Y_original = dict_info['Y_original']
    Y_normalized = dict_info['Y_normalized']

    assert np.all(Y == Y_original)
    assert np.all(Y != Y_normalized)
    assert np.all(utils_bo.normalize_min_max(Y) == Y_normalized)

    model_bo = BO(arr_range, normalize_Y=False)
    next_point, dict_info = model_bo.optimize(X, Y)
    Y_original = dict_info['Y_original']
    Y_normalized = dict_info['Y_normalized']

    assert np.all(Y == Y_normalized)
    assert np.all(Y == Y_original)
Example #2
0
def test_normalize_min_max():
    Y = np.array([
        [1.0],
        [2.0],
        [10.0],
        [-5.0],
        [4.0],
        [2.0],
        [-4.0],
        [2.0],
    ])

    with pytest.raises(AssertionError) as error:
        package_target.normalize_min_max(123)
    with pytest.raises(AssertionError) as error:
        package_target.normalize_min_max('abc')
    with pytest.raises(AssertionError) as error:
        package_target.normalize_min_max(np.squeeze(Y))

    Y_normalized = package_target.normalize_min_max(Y)
    truth_Y = np.array([
        [6.0 / 15.0],
        [7.0 / 15.0],
        [1.0],
        [0.0],
        [9.0 / 15.0],
        [7.0 / 15.0],
        [1.0 / 15.0],
        [7.0 / 15.0],
    ]) * constants.MULTIPLIER_RESPONSE

    assert np.all(Y_normalized == truth_Y)

    Y = np.array([
        [10.0],
        [10.0],
        [10.0],
    ])

    Y_normalized = package_target.normalize_min_max(Y)

    assert np.all(Y_normalized == Y)
Example #3
0
def thompson_sampling_gp_iteration(range_X: np.ndarray,
    X: np.ndarray, Y: np.ndarray,
    normalize_Y: bool=constants.NORMALIZE_RESPONSE,
    str_sampling_method: str='sobol',
    num_samples: int=200,
    debug: bool=False,
) -> np.ndarray:
    """
    It chooses the next query point via Thompson sampling.

    :param range_X: bounds for a search space. Shape: (d, 2).
    :type range_X: numpy.ndarray
    :param X: inputs. Shape: (n, d).
    :type X: numpy.ndarray
    :param Y: outputs. Shape: (n, 1).
    :type Y: numpy.ndarray
    :param normalize_Y: flag for normalizing responses.
    :type normalize_Y: bool., optional
    :param str_sampling_method: the name of sampling method.
    :type str_sampling_method: str., optional
    :param num_samples: the number of samples.
    :type num_samples: int., optional
    :param debug: flag for a debug option.
    :type debug: bool., optional

    :returns: the next point. Shape: (d, ).
    :rtype: numpy.ndarray

    :raises: AssertionError, ValueError

    """

    assert isinstance(range_X, np.ndarray)
    assert isinstance(X, np.ndarray)
    assert isinstance(Y, np.ndarray)
    assert isinstance(normalize_Y, bool)
    assert isinstance(str_sampling_method, str)
    assert isinstance(num_samples, int)
    assert isinstance(debug, bool)
    assert len(range_X.shape) == 2
    assert len(X.shape) == 2
    assert len(Y.shape) == 2
    assert X.shape[0] == Y.shape[0]
    assert range_X.shape[0] == X.shape[1]
    assert range_X.shape[1] == 2

    str_cov = 'matern52'
    prior_mu = None
    str_optimizer_method_gp = 'BFGS'
#    use_ard = True

    if normalize_Y:
        if debug:
            logger.debug('Responses are normalized.')

        Y = utils_bo.normalize_min_max(Y)

    model_bo = bo.BOwGP(range_X)
    X_test = model_bo.get_samples(str_sampling_method, num_samples=num_samples)

    mu_Xs, _, Sigma_Xs = gp.predict_with_optimized_hyps(X, Y, X_test,
        str_cov=str_cov, str_optimizer_method=str_optimizer_method_gp,
        prior_mu=prior_mu, debug=debug)
    mu_Xs = np.squeeze(mu_Xs, axis=1)

    Y_sampled = None
    list_jitters = [0.0, 1e-4, 1e-2, 1e0, 1e1, 1e2, 1e3, 1e4]

    for jitter_cov in list_jitters:
        try:
            Sigma_Xs_ = Sigma_Xs + jitter_cov * np.eye(Sigma_Xs.shape[0])
            Y_sampled = gp.sample_functions(mu_Xs, Sigma_Xs_, num_samples=1)

            break
        except ValueError: # pragma: no cover
            pass

    if Y_sampled is None: # pragma: no cover
        raise ValueError('jitter_cov is not large enough.')

    ind_min = np.argmin(Y_sampled[:, 0])
    next_point = X_test[ind_min]

    return next_point
Example #4
0
    def optimize(
        self,
        X_train: np.ndarray,
        Y_train: np.ndarray,
        str_sampling_method: str = constants.STR_SAMPLING_METHOD_AO,
        num_samples: int = constants.NUM_SAMPLES_AO,
        str_mlm_method: str = constants.STR_MLM_METHOD,
    ) -> constants.TYPING_TUPLE_ARRAY_DICT:
        """
        It computes acquired example, candidates of acquired examples,
        acquisition function values for the candidates, covariance matrix,
        inverse matrix of the covariance matrix, hyperparameters optimized,
        and execution times.

        :param X_train: inputs. Shape: (n, d) or (n, m, d).
        :type X_train: numpy.ndarray
        :param Y_train: outputs. Shape: (n, 1).
        :type Y_train: numpy.ndarray
        :param str_sampling_method: the name of sampling method for
            acquisition function optimization.
        :type str_sampling_method: str., optional
        :param num_samples: the number of samples.
        :type num_samples: int., optional
        :param str_mlm_method: the name of marginal likelihood maximization
            method for Gaussian process regression.
        :type str_mlm_method: str., optional

        :returns: acquired example and dictionary of information. Shape: ((d, ), dict.).
        :rtype: (numpy.ndarray, dict.)

        :raises: AssertionError

        """

        assert isinstance(X_train, np.ndarray)
        assert isinstance(Y_train, np.ndarray)
        assert isinstance(str_sampling_method, str)
        assert isinstance(num_samples, int)
        assert isinstance(str_mlm_method, str)
        assert len(X_train.shape) == 2
        assert len(Y_train.shape) == 2
        assert Y_train.shape[1] == 1
        assert X_train.shape[0] == Y_train.shape[0]
        assert X_train.shape[1] == self.num_dim
        assert num_samples > 0
        assert str_sampling_method in constants.ALLOWED_SAMPLING_METHOD
        assert str_mlm_method in constants.ALLOWED_MLM_METHOD

        time_start = time.time()
        Y_train_orig = Y_train

        if self.normalize_Y and str_mlm_method != 'converged':
            if self.debug:
                self.logger.debug('Responses are normalized.')

            Y_train = utils_bo.normalize_min_max(Y_train)

        time_start_surrogate = time.time()

        if str_mlm_method == 'regular':
            cov_X_X, inv_cov_X_X, hyps = gp_kernel.get_optimized_kernel(
                X_train,
                Y_train,
                self.prior_mu,
                self.str_cov,
                str_optimizer_method=self.str_optimizer_method_gp,
                str_modelselection_method=self.str_modelselection_method,
                use_ard=self.use_ard,
                debug=self.debug)
        elif str_mlm_method == 'combined':
            from bayeso.gp import gp_likelihood
            from bayeso.utils import utils_gp
            from bayeso.utils import utils_covariance

            prior_mu_train = utils_gp.get_prior_mu(self.prior_mu, X_train)

            neg_log_ml_best = np.inf
            cov_X_X_best = None
            inv_cov_X_X_best = None
            hyps_best = None

            for cur_str_optimizer_method in ['BFGS', 'Nelder-Mead']:
                cov_X_X, inv_cov_X_X, hyps = gp_kernel.get_optimized_kernel(
                    X_train,
                    Y_train,
                    self.prior_mu,
                    self.str_cov,
                    str_optimizer_method=cur_str_optimizer_method,
                    str_modelselection_method=self.str_modelselection_method,
                    use_ard=self.use_ard,
                    debug=self.debug)
                cur_neg_log_ml_ = gp_likelihood.neg_log_ml(
                    X_train,
                    Y_train,
                    utils_covariance.convert_hyps(
                        self.str_cov, hyps, fix_noise=constants.FIX_GP_NOISE),
                    self.str_cov,
                    prior_mu_train,
                    use_ard=self.use_ard,
                    fix_noise=constants.FIX_GP_NOISE,
                    use_gradient=False,
                    debug=self.debug)

                if cur_neg_log_ml_ < neg_log_ml_best:
                    neg_log_ml_best = cur_neg_log_ml_
                    cov_X_X_best = cov_X_X
                    inv_cov_X_X_best = inv_cov_X_X
                    hyps_best = hyps

            cov_X_X = cov_X_X_best
            inv_cov_X_X = inv_cov_X_X_best
            hyps = hyps_best
        elif str_mlm_method == 'converged':
            fix_noise = constants.FIX_GP_NOISE

            if self.is_optimize_hyps:
                cov_X_X, inv_cov_X_X, hyps = gp_kernel.get_optimized_kernel(
                    X_train,
                    Y_train,
                    self.prior_mu,
                    self.str_cov,
                    str_optimizer_method=self.str_optimizer_method_gp,
                    str_modelselection_method=self.str_modelselection_method,
                    use_ard=self.use_ard,
                    debug=self.debug)

                self.is_optimize_hyps = not utils_bo.check_hyps_convergence(
                    self.historical_hyps, hyps, self.str_cov, fix_noise)
            else:  # pragma: no cover
                if self.debug:
                    self.logger.debug('hyps converged.')
                hyps = self.historical_hyps[-1]
                cov_X_X, inv_cov_X_X, _ = covariance.get_kernel_inverse(
                    X_train,
                    hyps,
                    self.str_cov,
                    fix_noise=fix_noise,
                    debug=self.debug)
        else:  # pragma: no cover
            raise ValueError('optimize: missing condition for str_mlm_method.')

        self.historical_hyps.append(hyps)

        time_end_surrogate = time.time()

        time_start_acq = time.time()
        fun_negative_acquisition = lambda X_test: -1.0 * self.compute_acquisitions(
            X_test, X_train, Y_train, cov_X_X, inv_cov_X_X, hyps)
        next_point, next_points = self._optimize(
            fun_negative_acquisition,
            str_sampling_method=str_sampling_method,
            num_samples=num_samples)
        time_end_acq = time.time()

        acquisitions = fun_negative_acquisition(next_points)
        time_end = time.time()

        dict_info = {
            'next_points': next_points,
            'acquisitions': acquisitions,
            'Y_original': Y_train_orig,
            'Y_normalized': Y_train,
            'cov_X_X': cov_X_X,
            'inv_cov_X_X': inv_cov_X_X,
            'hyps': hyps,
            'time_surrogate': time_end_surrogate - time_start_surrogate,
            'time_acq': time_end_acq - time_start_acq,
            'time_overall': time_end - time_start,
        }

        if self.debug:
            self.logger.debug('overall time consumed to acquire: %.4f sec.',
                              time_end - time_start)

        return next_point, dict_info
Example #5
0
    def optimize(
        self,
        X_train: np.ndarray,
        Y_train: np.ndarray,
        str_sampling_method: str = constants.STR_SAMPLING_METHOD_AO_TREES,
        num_samples: int = constants.NUM_SAMPLES_AO_TREES,
    ) -> constants.TYPING_TUPLE_ARRAY_DICT:
        """
        It computes acquired example, candidates of acquired examples,
        acquisition function values for the candidates, covariance matrix,
        inverse matrix of the covariance matrix, hyperparameters optimized,
        and execution times.

        :param X_train: inputs. Shape: (n, d).
        :type X_train: numpy.ndarray
        :param Y_train: outputs. Shape: (n, 1).
        :type Y_train: numpy.ndarray
        :param str_sampling_method: the name of sampling method for
            acquisition function optimization.
        :type str_sampling_method: str., optional
        :param num_samples: the number of samples.
        :type num_samples: int., optional

        :returns: acquired example and dictionary of information. Shape: ((d, ), dict.).
        :rtype: (numpy.ndarray, dict.)

        :raises: AssertionError

        """

        assert isinstance(X_train, np.ndarray)
        assert isinstance(Y_train, np.ndarray)
        assert isinstance(str_sampling_method, str)
        assert isinstance(num_samples, int)
        assert len(X_train.shape) == 2
        assert len(Y_train.shape) == 2
        assert Y_train.shape[1] == 1
        assert X_train.shape[0] == Y_train.shape[0]
        assert X_train.shape[1] == self.num_dim
        assert num_samples > 0
        assert str_sampling_method in constants.ALLOWED_SAMPLING_METHOD

        time_start = time.time()
        Y_train_orig = Y_train

        if self.normalize_Y:
            if self.debug:
                self.logger.debug('Responses are normalized.')

            Y_train = utils_bo.normalize_min_max(Y_train)

        time_start_surrogate = time.time()
        trees = self.get_trees(X_train, Y_train)
        time_end_surrogate = time.time()

        time_start_acq = time.time()
        next_points = self.get_samples(str_sampling_method,
                                       fun_objective=None,
                                       num_samples=num_samples)

        fun_negative_acquisition = lambda X_test: -1.0 * self.compute_acquisitions(
            X_test, X_train, Y_train, trees)
        acquisitions = fun_negative_acquisition(next_points)
        ind_next_point = np.argmin(acquisitions)
        next_point = next_points[ind_next_point]

        time_end_acq = time.time()

        time_end = time.time()

        dict_info = {
            'next_points': next_points,
            'acquisitions': acquisitions,
            'Y_original': Y_train_orig,
            'Y_normalized': Y_train,
            'trees': trees,
            'time_surrogate': time_end_surrogate - time_start_surrogate,
            'time_acq': time_end_acq - time_start_acq,
            'time_overall': time_end - time_start,
        }

        if self.debug:
            self.logger.debug('overall time consumed to acquire: %.4f sec.',
                              time_end - time_start)

        return next_point, dict_info