def predict_with_cov(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarray, cov_X_X: np.ndarray, inv_cov_X_X: np.ndarray, hyps: dict, str_cov: str=constants.STR_COV, prior_mu: constants.TYPING_UNION_CALLABLE_NONE=None, debug: bool=False ) -> constants.TYPING_TUPLE_THREE_ARRAYS: """ This function returns posterior mean and posterior standard deviation functions over `X_test`, computed by Gaussian process regression with `X_train`, `Y_train`, `cov_X_X`, `inv_cov_X_X`, and `hyps`. :param X_train: inputs. Shape: (n, d) or (n, m, d). :type X_train: numpy.ndarray :param Y_train: outputs. Shape: (n, 1). :type Y_train: numpy.ndarray :param X_test: inputs. Shape: (l, d) or (l, m, d). :type X_test: numpy.ndarray :param cov_X_X: kernel matrix over `X_train`. Shape: (n, n). :type cov_X_X: numpy.ndarray :param inv_cov_X_X: kernel matrix inverse over `X_train`. Shape: (n, n). :type inv_cov_X_X: numpy.ndarray :param hyps: dictionary of hyperparameters for Gaussian process. :type hyps: dict. :param str_cov: the name of covariance function. :type str_cov: str., optional :param prior_mu: None, or prior mean function. :type prior_mu: NoneType, or callable, optional :param debug: flag for printing log messages. :type debug: bool., optional :returns: a tuple of posterior mean function over `X_test`, posterior standard deviation function over `X_test`, and posterior covariance matrix over `X_test`. Shape: ((l, 1), (l, 1), (l, l)). :rtype: tuple of (numpy.ndarray, numpy.ndarray, numpy.ndarray) :raises: AssertionError """ utils_gp.validate_common_args(X_train, Y_train, str_cov, prior_mu, debug, X_test) assert isinstance(cov_X_X, np.ndarray) assert isinstance(inv_cov_X_X, np.ndarray) assert isinstance(hyps, dict) assert len(cov_X_X.shape) == 2 assert len(inv_cov_X_X.shape) == 2 assert (np.array(cov_X_X.shape) == np.array(inv_cov_X_X.shape)).all() utils_covariance.check_str_cov('predict_with_cov', str_cov, X_train.shape, shape_X2=X_test.shape) prior_mu_train = utils_gp.get_prior_mu(prior_mu, X_train) prior_mu_test = utils_gp.get_prior_mu(prior_mu, X_test) cov_X_Xs = covariance.cov_main(str_cov, X_train, X_test, hyps, False) cov_Xs_Xs = covariance.cov_main(str_cov, X_test, X_test, hyps, True) cov_Xs_Xs = (cov_Xs_Xs + cov_Xs_Xs.T) / 2.0 mu_Xs = np.dot(np.dot(cov_X_Xs.T, inv_cov_X_X), Y_train - prior_mu_train) + prior_mu_test Sigma_Xs = cov_Xs_Xs - np.dot(np.dot(cov_X_Xs.T, inv_cov_X_X), cov_X_Xs) return mu_Xs, np.expand_dims(np.sqrt(np.maximum(np.diag(Sigma_Xs), 0.0)), axis=1), Sigma_Xs
def optimize( self, X_train: np.ndarray, Y_train: np.ndarray, str_sampling_method: str = constants.STR_SAMPLING_METHOD_AO, num_samples: int = constants.NUM_SAMPLES_AO, str_mlm_method: str = constants.STR_MLM_METHOD, ) -> constants.TYPING_TUPLE_ARRAY_DICT: """ It computes acquired example, candidates of acquired examples, acquisition function values for the candidates, covariance matrix, inverse matrix of the covariance matrix, hyperparameters optimized, and execution times. :param X_train: inputs. Shape: (n, d) or (n, m, d). :type X_train: numpy.ndarray :param Y_train: outputs. Shape: (n, 1). :type Y_train: numpy.ndarray :param str_sampling_method: the name of sampling method for acquisition function optimization. :type str_sampling_method: str., optional :param num_samples: the number of samples. :type num_samples: int., optional :param str_mlm_method: the name of marginal likelihood maximization method for Gaussian process regression. :type str_mlm_method: str., optional :returns: acquired example and dictionary of information. Shape: ((d, ), dict.). :rtype: (numpy.ndarray, dict.) :raises: AssertionError """ assert isinstance(X_train, np.ndarray) assert isinstance(Y_train, np.ndarray) assert isinstance(str_sampling_method, str) assert isinstance(num_samples, int) assert isinstance(str_mlm_method, str) assert len(X_train.shape) == 2 assert len(Y_train.shape) == 2 assert Y_train.shape[1] == 1 assert X_train.shape[0] == Y_train.shape[0] assert X_train.shape[1] == self.num_dim assert num_samples > 0 assert str_sampling_method in constants.ALLOWED_SAMPLING_METHOD assert str_mlm_method in constants.ALLOWED_MLM_METHOD time_start = time.time() Y_train_orig = Y_train if self.normalize_Y and str_mlm_method != 'converged': if self.debug: self.logger.debug('Responses are normalized.') Y_train = utils_bo.normalize_min_max(Y_train) time_start_surrogate = time.time() if str_mlm_method == 'regular': cov_X_X, inv_cov_X_X, hyps = gp_kernel.get_optimized_kernel( X_train, Y_train, self.prior_mu, self.str_cov, str_optimizer_method=self.str_optimizer_method_gp, str_modelselection_method=self.str_modelselection_method, use_ard=self.use_ard, debug=self.debug) elif str_mlm_method == 'combined': from bayeso.gp import gp_likelihood from bayeso.utils import utils_gp from bayeso.utils import utils_covariance prior_mu_train = utils_gp.get_prior_mu(self.prior_mu, X_train) neg_log_ml_best = np.inf cov_X_X_best = None inv_cov_X_X_best = None hyps_best = None for cur_str_optimizer_method in ['BFGS', 'Nelder-Mead']: cov_X_X, inv_cov_X_X, hyps = gp_kernel.get_optimized_kernel( X_train, Y_train, self.prior_mu, self.str_cov, str_optimizer_method=cur_str_optimizer_method, str_modelselection_method=self.str_modelselection_method, use_ard=self.use_ard, debug=self.debug) cur_neg_log_ml_ = gp_likelihood.neg_log_ml( X_train, Y_train, utils_covariance.convert_hyps( self.str_cov, hyps, fix_noise=constants.FIX_GP_NOISE), self.str_cov, prior_mu_train, use_ard=self.use_ard, fix_noise=constants.FIX_GP_NOISE, use_gradient=False, debug=self.debug) if cur_neg_log_ml_ < neg_log_ml_best: neg_log_ml_best = cur_neg_log_ml_ cov_X_X_best = cov_X_X inv_cov_X_X_best = inv_cov_X_X hyps_best = hyps cov_X_X = cov_X_X_best inv_cov_X_X = inv_cov_X_X_best hyps = hyps_best elif str_mlm_method == 'converged': fix_noise = constants.FIX_GP_NOISE if self.is_optimize_hyps: cov_X_X, inv_cov_X_X, hyps = gp_kernel.get_optimized_kernel( X_train, Y_train, self.prior_mu, self.str_cov, str_optimizer_method=self.str_optimizer_method_gp, str_modelselection_method=self.str_modelselection_method, use_ard=self.use_ard, debug=self.debug) self.is_optimize_hyps = not utils_bo.check_hyps_convergence( self.historical_hyps, hyps, self.str_cov, fix_noise) else: # pragma: no cover if self.debug: self.logger.debug('hyps converged.') hyps = self.historical_hyps[-1] cov_X_X, inv_cov_X_X, _ = covariance.get_kernel_inverse( X_train, hyps, self.str_cov, fix_noise=fix_noise, debug=self.debug) else: # pragma: no cover raise ValueError('optimize: missing condition for str_mlm_method.') self.historical_hyps.append(hyps) time_end_surrogate = time.time() time_start_acq = time.time() fun_negative_acquisition = lambda X_test: -1.0 * self.compute_acquisitions( X_test, X_train, Y_train, cov_X_X, inv_cov_X_X, hyps) next_point, next_points = self._optimize( fun_negative_acquisition, str_sampling_method=str_sampling_method, num_samples=num_samples) time_end_acq = time.time() acquisitions = fun_negative_acquisition(next_points) time_end = time.time() dict_info = { 'next_points': next_points, 'acquisitions': acquisitions, 'Y_original': Y_train_orig, 'Y_normalized': Y_train, 'cov_X_X': cov_X_X, 'inv_cov_X_X': inv_cov_X_X, 'hyps': hyps, 'time_surrogate': time_end_surrogate - time_start_surrogate, 'time_acq': time_end_acq - time_start_acq, 'time_overall': time_end - time_start, } if self.debug: self.logger.debug('overall time consumed to acquire: %.4f sec.', time_end - time_start) return next_point, dict_info
def get_optimized_kernel( X_train: np.ndarray, Y_train: np.ndarray, prior_mu: constants.TYPING_UNION_CALLABLE_NONE, str_cov: str, str_optimizer_method: str = constants.STR_OPTIMIZER_METHOD_GP, str_modelselection_method: str = constants.STR_MODELSELECTION_METHOD, use_ard: bool = constants.USE_ARD, fix_noise: bool = constants.FIX_GP_NOISE, debug: bool = False) -> constants.TYPING_TUPLE_TWO_ARRAYS_DICT: """ This function computes the kernel matrix optimized by optimization method specified, its inverse matrix, and the optimized hyperparameters. :param X_train: inputs. Shape: (n, d) or (n, m, d). :type X_train: numpy.ndarray :param Y_train: outputs. Shape: (n, 1). :type Y_train: numpy.ndarray :param prior_mu: prior mean function or None. :type prior_mu: callable or NoneType :param str_cov: the name of covariance function. :type str_cov: str. :param str_optimizer_method: the name of optimization method. :type str_optimizer_method: str., optional :param str_modelselection_method: the name of model selection method. :type str_modelselection_method: str., optional :param use_ard: flag for using automatic relevance determination. :type use_ard: bool., optional :param fix_noise: flag for fixing a noise. :type fix_noise: bool., optional :param debug: flag for printing log messages. :type debug: bool., optional :returns: a tuple of kernel matrix over `X_train`, kernel matrix inverse, and dictionary of hyperparameters. :rtype: tuple of (numpy.ndarray, numpy.ndarray, dict.) :raises: AssertionError, ValueError """ # TODO: check to input same fix_noise to convert_hyps and restore_hyps utils_gp.validate_common_args(X_train, Y_train, str_cov, prior_mu, debug) assert isinstance(str_optimizer_method, str) assert isinstance(str_modelselection_method, str) assert isinstance(use_ard, bool) assert isinstance(fix_noise, bool) utils_covariance.check_str_cov('get_optimized_kernel', str_cov, X_train.shape) assert str_optimizer_method in constants.ALLOWED_OPTIMIZER_METHOD_GP assert str_modelselection_method in constants.ALLOWED_MODELSELECTION_METHOD use_gradient = bool(str_optimizer_method != 'Nelder-Mead') # TODO: Now, use_gradient is fixed as False. # use_gradient = False time_start = time.time() if debug: logger.debug('str_optimizer_method: %s', str_optimizer_method) logger.debug('str_modelselection_method: %s', str_modelselection_method) logger.debug('use_gradient: %s', use_gradient) prior_mu_train = utils_gp.get_prior_mu(prior_mu, X_train) if str_cov in constants.ALLOWED_COV_BASE: num_dim = X_train.shape[1] elif str_cov in constants.ALLOWED_COV_SET: num_dim = X_train.shape[2] use_gradient = False if str_modelselection_method == 'ml': neg_log_ml_ = lambda hyps: gp_likelihood.neg_log_ml( X_train, Y_train, hyps, str_cov, prior_mu_train, use_ard=use_ard, fix_noise=fix_noise, use_gradient=use_gradient, debug=debug) elif str_modelselection_method == 'loocv': # TODO: add use_ard. neg_log_ml_ = lambda hyps: gp_likelihood.neg_log_pseudo_l_loocv( X_train, Y_train, hyps, str_cov, prior_mu_train, fix_noise=fix_noise, debug=debug) use_gradient = False else: # pragma: no cover raise ValueError( 'get_optimized_kernel: missing conditions for str_modelselection_method.' ) hyps_converted = utils_covariance.convert_hyps(str_cov, utils_covariance.get_hyps( str_cov, num_dim, use_ard=use_ard), fix_noise=fix_noise) if str_optimizer_method in ['BFGS', 'SLSQP']: result_optimized = scipy.optimize.minimize(neg_log_ml_, hyps_converted, method=str_optimizer_method, jac=use_gradient, options={'disp': False}) if debug: logger.debug('negative log marginal likelihood: %.6f', result_optimized.fun) logger.debug('scipy message: %s', result_optimized.message) result_optimized = result_optimized.x elif str_optimizer_method in ['L-BFGS-B', 'SLSQP-Bounded']: if str_optimizer_method == 'SLSQP-Bounded': str_optimizer_method = 'SLSQP' bounds = utils_covariance.get_range_hyps(str_cov, num_dim, use_ard=use_ard, fix_noise=fix_noise) result_optimized = scipy.optimize.minimize(neg_log_ml_, hyps_converted, method=str_optimizer_method, bounds=bounds, jac=use_gradient, options={'disp': False}) if debug: logger.debug('negative log marginal likelihood: %.6f', result_optimized.fun) logger.debug('scipy message: %s', result_optimized.message) result_optimized = result_optimized.x elif str_optimizer_method in ['Nelder-Mead']: result_optimized = scipy.optimize.minimize(neg_log_ml_, hyps_converted, method=str_optimizer_method, options={'disp': False}) if debug: logger.debug('negative log marginal likelihood: %.6f', result_optimized.fun) logger.debug('scipy message: %s', result_optimized.message) result_optimized = result_optimized.x else: # pragma: no cover raise ValueError( 'get_optimized_kernel: missing conditions for str_optimizer_method' ) hyps = utils_covariance.restore_hyps(str_cov, result_optimized, use_ard=use_ard, fix_noise=fix_noise) hyps = utils_covariance.validate_hyps_dict(hyps, str_cov, num_dim) cov_X_X, inv_cov_X_X, _ = covariance.get_kernel_inverse( X_train, hyps, str_cov, fix_noise=fix_noise, debug=debug) time_end = time.time() if debug: logger.debug('hyps optimized: %s', utils_logger.get_str_hyps(hyps)) logger.debug('time consumed to construct gpr: %.4f sec.', time_end - time_start) return cov_X_X, inv_cov_X_X, hyps
def test_get_prior_mu(): fun_prior = lambda X: np.expand_dims(np.linalg.norm(X, axis=1), axis=1) fun_prior_1d = lambda X: np.linalg.norm(X, axis=1) X = np.reshape(np.arange(0, 90), (30, 3)) with pytest.raises(AssertionError) as error: package_target.get_prior_mu(1, X) with pytest.raises(AssertionError) as error: package_target.get_prior_mu(fun_prior, 1) with pytest.raises(AssertionError) as error: package_target.get_prior_mu(fun_prior, np.arange(0, 100)) with pytest.raises(AssertionError) as error: package_target.get_prior_mu(None, np.arange(0, 100)) with pytest.raises(AssertionError) as error: package_target.get_prior_mu(fun_prior_1d, X) assert (package_target.get_prior_mu(None, X) == np.zeros((X.shape[0], 1))).all() assert (package_target.get_prior_mu(fun_prior, X) == fun_prior(X)).all()
def get_optimized_kernel( X_train, Y_train, prior_mu, str_cov, str_optimizer_method=constants.STR_OPTIMIZER_METHOD_GP, str_modelselection_method=constants.STR_MODELSELECTION_METHOD, is_fixed_noise=constants.IS_FIXED_GP_NOISE, debug=False): """ This function computes the kernel matrix optimized by optimization method specified, its inverse matrix, and the optimized hyperparameters. :param X_train: inputs. Shape: (n, d) or (n, m, d). :type X_train: numpy.ndarray :param Y_train: outputs. Shape: (n, 1). :type Y_train: numpy.ndarray :param prior_mu: prior mean function or None. :type prior_mu: function or NoneType :param str_cov: the name of covariance function. :type str_cov: str. :param str_optimizer_method: the name of optimization method. :type str_optimizer_method: str., optional :param str_modelselection_method: the name of model selection method. :type str_modelselection_method: str., optional :param is_fixed_noise: flag for fixing a noise. :type is_fixed_noise: bool., optional :param debug: flag for printing log messages. :type debug: bool., optional :returns: a tuple of kernel matrix over `X_train`, kernel matrix inverse, and dictionary of hyperparameters. :rtype: tuple of (numpy.ndarray, numpy.ndarray, dict.) :raises: AssertionError, ValueError """ # TODO: check to input same is_fixed_noise to convert_hyps and restore_hyps assert isinstance(X_train, np.ndarray) assert isinstance(Y_train, np.ndarray) assert callable(prior_mu) or prior_mu is None assert isinstance(str_cov, str) assert isinstance(str_optimizer_method, str) assert isinstance(str_modelselection_method, str) assert isinstance(is_fixed_noise, bool) assert isinstance(debug, bool) assert len(Y_train.shape) == 2 assert X_train.shape[0] == Y_train.shape[0] utils_gp.check_str_cov('get_optimized_kernel', str_cov, X_train.shape) assert str_optimizer_method in constants.ALLOWED_OPTIMIZER_METHOD_GP assert str_modelselection_method in constants.ALLOWED_MODELSELECTION_METHOD # TODO: fix this. if str_optimizer_method != 'Nelder-Mead': is_gradient = True else: is_gradient = False time_start = time.time() if debug: logger.debug('str_optimizer_method: {}'.format(str_optimizer_method)) if debug: logger.debug( 'str_modelselection_method: {}'.format(str_modelselection_method)) prior_mu_train = utils_gp.get_prior_mu(prior_mu, X_train) if str_cov in constants.ALLOWED_GP_COV_BASE: num_dim = X_train.shape[1] elif str_cov in constants.ALLOWED_GP_COV_SET: num_dim = X_train.shape[2] is_gradient = False if str_modelselection_method == 'ml': neg_log_ml_ = lambda hyps: neg_log_ml(X_train, Y_train, hyps, str_cov, prior_mu_train, is_fixed_noise=is_fixed_noise, is_gradient=is_gradient, debug=debug) elif str_modelselection_method == 'loocv': neg_log_ml_ = lambda hyps: neg_log_pseudo_l_loocv(X_train, Y_train, hyps, str_cov, prior_mu_train, is_fixed_noise= is_fixed_noise, debug=debug) is_gradient = False else: # pragma: no cover raise ValueError( 'get_optimized_kernel: missing conditions for str_modelselection_method.' ) hyps_converted = utils_covariance.convert_hyps( str_cov, utils_covariance.get_hyps(str_cov, num_dim), is_fixed_noise=is_fixed_noise, ) if str_optimizer_method == 'BFGS': result_optimized = scipy.optimize.minimize(neg_log_ml_, hyps_converted, method=str_optimizer_method, jac=is_gradient, options={'disp': False}) if debug: logger.debug('scipy message: {}'.format(result_optimized.message)) result_optimized = result_optimized.x elif str_optimizer_method == 'L-BFGS-B': bounds = utils_covariance.get_range_hyps(str_cov, num_dim, is_fixed_noise=is_fixed_noise) result_optimized = scipy.optimize.minimize(neg_log_ml_, hyps_converted, method=str_optimizer_method, bounds=bounds, jac=is_gradient, options={'disp': False}) if debug: logger.debug('scipy message: {}'.format(result_optimized.message)) result_optimized = result_optimized.x elif str_optimizer_method == 'Nelder-Mead': result_optimized = scipy.optimize.minimize(neg_log_ml_, hyps_converted, method=str_optimizer_method, options={'disp': False}) if debug: logger.debug('scipy message: {}'.format(result_optimized.message)) result_optimized = result_optimized.x # TODO: Fill this conditions elif str_optimizer_method == 'DIRECT': # pragma: no cover raise NotImplementedError( 'get_optimized_kernel: allowed str_optimizer_method, but it is not implemented.' ) else: # pragma: no cover raise ValueError( 'get_optimized_kernel: missing conditions for str_optimizer_method' ) hyps = utils_covariance.restore_hyps(str_cov, result_optimized, is_fixed_noise=is_fixed_noise) hyps, _ = utils_covariance.validate_hyps_dict(hyps, str_cov, num_dim) cov_X_X, inv_cov_X_X, grad_cov_X_X = gp_common.get_kernel_inverse( X_train, hyps, str_cov, is_fixed_noise=is_fixed_noise, debug=debug) time_end = time.time() if debug: logger.debug('hyps optimized: {}'.format( utils_logger.get_str_hyps(hyps))) if debug: logger.debug( 'time consumed to construct gpr: {:.4f} sec.'.format(time_end - time_start)) return cov_X_X, inv_cov_X_X, hyps