def find_reference_point_using_direct(tasks, module, input_space, grid_size = 20000):

	def create_fun_neg(task):
		def fun(params, gradient = False):

			if len(params.shape) > 1 and params.shape[ 1 ] > 1:
				params = params.flatten()

			params = input_space.from_unit(np.array([ params ])).flatten()

			return -1.0 * module.main(0, paramify_no_types(input_space.paramify(params)))[ task ]

		return fun

	funs_neg = [ create_fun_neg(task) for task in tasks ]

	reference_point = np.zeros(len(funs_neg))

	for i in range(len(funs_neg)):

                def f(x, user_data):
			if x.ndim == 1:
				x = x[None,:]
				value = funs_neg[ i ](x)

			return value, 0

                l = np.zeros(input_space.num_dims) * 1.0
                u = np.ones(input_space.num_dims) * 1.0
		
		x, y_opt, ierror = solve(f, l, u, maxf = 85000)

		reference_point[ i ] = -1.0 * y_opt + np.abs(-1.0 * y_opt * 0.01)

	return reference_point
예제 #2
0
    def optimize(self, x0, f=None, df=None, f_df=None):
        """
        :param x0: initial point for a local optimizer.
        :param f: function to optimize.
        :param df: gradient of the function to optimize.
        :param f_df: returns both the function to optimize and its gradient.
        """
        # Based on the documentation of DIRECT, it does not seem we can pass through an initial point x0
        try:
            from DIRECT import solve

            def DIRECT_f_wrapper(f):
                def g(x, user_data):
                    return f(np.array([x])), 0

                return g

            lB = np.asarray(self.bounds)[:, 0]
            uB = np.asarray(self.bounds)[:, 1]
            x, _, _ = solve(DIRECT_f_wrapper(f), lB, uB, maxT=self.maxiter)
            return np.atleast_2d(x), f(np.atleast_2d(x))
        except ImportError:
            print(
                "Cannot find DIRECT library, please install it to use this option."
            )
예제 #3
0
    def optimize(self, x0=None, f=None, df=None, f_df=None):
        """
        :param x0: initial point for a local optimizer.
        :param f: function to optimize.
        :param df: gradient of the function to optimize.
        :param f_df: returns both the function to optimize and its gradient.
        """
        try:
            from DIRECT import solve
            import numpy as np

            def DIRECT_f_wrapper(f):
                def g(x, user_data):
                    return f(np.array([x])), 0

                return g

            lB = np.asarray(self.space.get_bounds())[:, 0]
            uB = np.asarray(self.space.get_bounds())[:, 1]
            x, _, _ = solve(DIRECT_f_wrapper(f), lB, uB, maxT=self.maxiter)
            return np.atleast_2d(x), f(np.atleast_2d(x))
        except:
            print(
                "Cannot find DIRECT library, please install it to use this option."
            )
예제 #4
0
def acq_max_direct(ac, gp, y_max, bounds):
    """
    A function to find the maximum of the acquisition function using
    the 'DIRECT' library.

    Input Parameters
    ----------
    ac: The acquisition function object that return its point-wise value.
    gp: A gaussian process fitted to the relevant data.
    y_max: The current maximum known value of the target function.
    bounds: The variables bounds to limit the search of the acq max.
    
    Returns
    -------
    x_max, The arg max of the acquisition function.
    """

    try:
        from DIRECT import solve
    except:
        print("Cannot find DIRECT library")

    def DIRECT_f_wrapper(ac):
        def g(x, user_data):
            return ac(np.array([x]), gp, y_max), 0

        return g

    lB = np.asarray(bounds)[:, 0]
    uB = np.asarray(bounds)[:, 1]

    #x,_,_ = solve(DIRECT_f_wrapper(f),lB,uB, maxT=750, maxf=2000,volper=0.005) # this can be used to speed up DIRECT (losses precission)
    x, _, _ = solve(DIRECT_f_wrapper(ac), lB, uB)
    return np.reshape(x, len(bounds))
예제 #5
0
def wrapper_DIRECT(f, bounds):
    '''
    Wrapper for DIRECT optimization method. It works partitioning iteratively the domain 
    of the function. Only requieres f and the box constrains to work
    :param f: function to optimize, acquisition.
    :param bounds: tuple determining the limits of the optimizer.

    '''
    try:
        from DIRECT import solve
        import numpy as np

        def DIRECT_f_wrapper(f):
            def g(x, user_data):
                return f(np.array([x])), 0

            return g

        lB = np.asarray(bounds)[:, 0]
        uB = np.asarray(bounds)[:, 1]
        x, _, _ = solve(DIRECT_f_wrapper(f),
                        lB,
                        uB,
                        maxT=750,
                        maxf=2000,
                        volper=0.005)
        return reshape(x, len(bounds))
    except:
        print(
            "Cannot find DIRECT library, please install it to use this option."
        )
예제 #6
0
파일: fitMilne.py 프로젝트: bxy8804/GPLines
    def optimizeGPDIRECT(self):
        """
		Optimize the marginal posterior of the GP to obtain the covariance and model parameters
		"""

        # Do a sensible initialization
        lower = np.zeros(self.nTotalPars)
        upper = np.zeros(self.nTotalPars)
        lower[0:self.nTotalParCovariance] = [
            np.log(1.0 / 3.0**2), np.log(1e-5**2)
        ]
        upper[0:self.nTotalParCovariance] = [
            np.log(1.0 / 0.5**2), np.log(1e-2**2)
        ]
        lower[self.nTotalParCovariance:] = [
            0, 0.0, 0.0, -5.0, 0.0, 0.0, 0.03, 0.0
        ]
        upper[self.nTotalParCovariance:] = [
            2000.0, 180.0, 180.0, 5.0, 2.0, 4.0, 0.3, 10.0
        ]
        x, fmin, ierror = solve(self.funcWrapperDIRECT,
                                lower,
                                upper,
                                volper=1e-15,
                                algmethod=1)
        self.optimalPars = x
        self.marginalLikelihood(self.optimalPars)
        return
예제 #7
0
    def optimize_acq_f(self, n_iter=20, method='random'):
        self.logger.debug('optimizer for acq:{}'.format(method))
        self.logger.debug('n_iter={}'.format(n_iter))
        self.logger.debug('optninit={}'.format(self.config))
        if self.acq_name == 'ES' or self.acq_name == 'MES':
            self.acquisition.update(
            )  #especially for information-theory based acq

        def obj_LBFGS(x):
            return -self.acq_f(x)

        def obj_DIRECT(x, u):
            return -self.acq_f(x), 0

        x_tries = np.random.uniform(self.bounds[0, :],
                                    self.bounds[1, :],
                                    size=(self.config['optninit'],
                                          self.bounds.shape[1]))
        if method == 'random':
            x_seeds = np.random.uniform(self.bounds[0, :],
                                        self.bounds[1, :],
                                        size=(n_iter, self.bounds.shape[1]))
            ys = -obj_LBFGS(x_seeds)
            x_max = x_tries[ys.argmax()].reshape((1, -1))
            max_acq = ys.max()
            for x_try in x_seeds:
                res = minimize(obj_LBFGS,
                               x_try.reshape(1, -1),
                               bounds=self.reformat_bounds(self.bounds),
                               method='L-BFGS-B')
                if not res.success:
                    continue

                if max_acq is None or -res.fun[0]:
                    x_max = res.x
                    max_acq = -res.fun[0]
        elif method == 'direct':
            x, _, _ = solve(obj_DIRECT,
                            self.bounds[0, :],
                            self.bounds[1, :],
                            maxf=1000,
                            logfilename=logfile)
            x = minimize(obj_LBFGS,
                         x,
                         bounds=self.reformat_bounds(self.bounds),
                         method='L-BFGS-B').x
            x_max = x
        else:
            raise NotImplementedError

        x_max = x_max.reshape((1, -1))
        self.logger.debug(
            'end optimizing acq_f, with x_max={}, self.acq_f(x_max)={}'.format(
                x_max, self.acq_f(x_max)))
        return np.clip(x_max, self.bounds[0, :], self.bounds[1, :]).reshape(
            (1, -1))
    def maximize(self, model_predict: callable, lower_bound: np.ndarray,
                 upper_bound: np.ndarray):
        def acquisition_curve(x: float, dummy):
            # DIRECT.solve() calls this function with x and a dummy value
            _, uncertainty = model_predict(x[None])
            return -uncertainty[:, None]

        xopt, fopt, _ = solve(acquisition_curve,
                              lower_bound,
                              upper_bound,
                              maxT=self.maxT,
                              algmethod=self.algmethod)
        return xopt, fopt
예제 #9
0
파일: gpLSD.py 프로젝트: postfix/pyGPLSD
	def optimizeDirectGP(self):
		"""
		Optimize the hyperparameters of the GP using the DIRECT optimization method
		
		Returns
		-------
		x: array of float
			Value of the optimal hyperpameters
		"""
		l = [0.05, np.log(np.min(self.variance) / self.nLines)]
		u = [4.0, np.log(10.0*np.max(self.variance) / self.nLines)]
		x, fmin, ierror = solve(self._objDirect, l, u, algmethod=1, maxf=300)
		print 'Optimal lambdaGP={0}, sigmaGP={1} - loglambdaGP={2}, logsigmaGP={3} - logL={4}'.format(np.exp(x[0]), np.exp(x[1]), x[0], x[1], fmin)
		return x[0], x[1]
예제 #10
0
    def predict_optimal(self, context):
        context = np.array([context])

        def DIRECT_obj(x, user_data):
            return -self.acq_f(np.concatenate((x, context)), alpha=0), 0

        x, fmin, ierror = solve(DIRECT_obj, self.lower_bounds,
                                self.upper_bounds)

        def LBFGS_obj(x):
            return -self.acq_f(np.concatenate((x, context)), alpha=0)
        bounds = [(self.lower_bounds[i], self.upper_bounds[i]) \
            for i in range(self.n_inputs)]
        res = minimize(LBFGS_obj, x, method='L-BFGS-B', bounds=bounds)
        return res.x
예제 #11
0
    def predict_optimal(self, context):
        """
        Given a context, predict the optimizer
        :param context:
        :return: the optimizer
        """
        def obj_DIRECT(x, _):
            return -self.acq_f(np.concatenate((x, context)), alpha=0), 0

        def obj_LBFGS(x):
            return -self.acq_f(np.concatenate((x, context)), alpha=0)

        context = np.array([context])
        x, _, _ = solve(obj_DIRECT, self.bounds_lower, self.bounds_upper)
        res = minimize(obj_LBFGS, x, method='L-BFGS-B', bounds=self.bounds)
        return res.x
예제 #12
0
    def optimize_acq_f(self, context):
        def obj_sw_DIRECT(x, user_data):
            return -self.acq_f(x), 0

        def obj_sw_LBFGS(x_sw):
            return -self.acq_f(x_sw)

        x, _, _ = solve(obj_sw_DIRECT,
                        self.bounds_lower,
                        self.bounds_upper,
                        maxf=500)
        return np.array(
            minimize(obj_sw_LBFGS,
                     x,
                     method='L-BFGS-B',
                     bounds=self.reformat_bounds(self.bounds)).x)
예제 #13
0
    def optimize_acq_f(self):
        def obj_sw_DIRECT(x, user_data):
            return -self.acq_f(x), 0

        def obj_sw_LBFGS(x_sw):
            return -self.acq_f(x_sw)

        x, _, _ = solve(obj_sw_DIRECT,
                        self.bounds_lower,
                        self.bounds_upper,
                        maxf=500)
        x = minimize(obj_sw_LBFGS,
                     x,
                     method='L-BFGS-B',
                     bounds=self.reformat_bounds(self.bounds)).x
        return np.array(x).reshape((1, self.num_inputs))
예제 #14
0
	def optimizeGPDIRECT(self):
		"""
		Optimize the marginal posterior of the GP to obtain the covariance and model parameters
		"""
		
# Do a sensible initialization				
		lower = np.zeros(self.nTotalPars)
		upper = np.zeros(self.nTotalPars)
		lower[0:self.nTotalParCovariance] = [np.log(1.0/3.0**2),np.log(1e-5**2)]
		upper[0:self.nTotalParCovariance] = [np.log(1.0/0.5**2),np.log(1e-1**2)]
		lower[self.nTotalParCovariance:] = [0.0, 0.0, 0.0, 0.0]
		upper[self.nTotalParCovariance:] = [0.5, 0.2, 1.0, 2*np.pi]
		x, fmin, ierror = solve(self.funcWrapperDIRECT, lower, upper, volper=1e-13, algmethod=1)
		self.optimalPars = x
		self.marginalLikelihood(self.optimalPars)
		return
예제 #15
0
파일: Opt_old.py 프로젝트: FNTwin/GPGO
    def optimize(self, f, boundaries, max_iter=6000):
        def DIRECT_wrapper(f):
            def g(x, user_data):
                return -f(np.array([x])), 0

            return g

        lb = boundaries[:, 0]
        ub = boundaries[:, 1]
        #maxf= 80000
        x, val, _ = solve(DIRECT_wrapper(f),
                          lb,
                          ub,
                          maxT=max_iter,
                          algmethod=0)
        logger.info("DIRECT:", x, val)
        return x
예제 #16
0
    def optimize(self, n_iterations):
        '''
        Iteratively optimizes the objective cost function and updates GP model.

        n_iterations:   Number of optimization iterations to run
        '''
        for _ in range(n_iterations):
            try:
                self.iterations += 1
                print('Iteration ' + str(self.iterations))
                context = self.context_space[self.iterations % \
                    len(self.context_space)]

                # DIRECT
                def DIRECT_obj(x, user_data):
                    return self.acq_f(np.concatenate([x, context], axis=0)), 0
                x, fmin, ierror = solve(DIRECT_obj, \
                    self.lower_bounds[:self.n_parameters], \
                    self.upper_bounds[:self.n_parameters], maxf=500)

                # L-BFGS-B
                def LBFGS_obj(x):
                    return self.acq_f(np.concatenate([x, context], axis=0))
                bounds = [(self.lower_bounds[i], self.upper_bounds[i]) \
                    for i in range(self.n_parameters)]
                res = minimize(LBFGS_obj, x, method='L-BFGS-B', bounds=bounds)

                # Evalaute new x with true objective function and re-train GP
                self.parameters = np.concatenate((self.parameters, \
                    np.reshape(res.x, (1,self.n_parameters))))
                self.contexts = np.concatenate((self.contexts, \
                    np.reshape(context, (1,self.n_contexts))))
                self.X = np.concatenate((self.parameters,self.contexts),axis=1)

                # Evaluating obj_f
                obj, info = self.obj_f(np.concatenate([res.x, context]))

                self.Y = np.concatenate((self.Y, [obj]))
                self.info = np.concatenate((self.info, [info]))

                self.train_GP(self.X, self.Y)
                self.model.optimize()
            except Exception:
                print('Exception encountered during optimization')
                traceback.print_exc()
예제 #17
0
    def optimize_acq_f(self, x_cn):
        def obj_sw_DIRECT(x_uc, user_data):
            x_uc = np.array(x_uc).reshape((1, self.n_uc))
            return -self.acq_f(np.concatenate((x_uc, x_cn), axis=1)), 0

        def obj_sw_LBFGS(x_uc):
            x_uc = np.array(x_uc).reshape((1, self.n_uc))
            return -self.acq_f(np.concatenate((x_uc, x_cn), axis=1))

        x, _, _ = solve(obj_sw_DIRECT,
                        self.bounds_lower,
                        self.bounds_upper,
                        maxf=500)
        x = minimize(obj_sw_LBFGS,
                     x,
                     method='L-BFGS-B',
                     bounds=self.reformat_bounds(self.bounds)).x
        return np.array(x).reshape((1, self.n_uc))
예제 #18
0
    def predict_optimal(self, context):
        '''
        Returns the predicted optimal parameters for a given context

        context:    A scalar value representing a specified context.
                    This value doesn't have to be in the given context_space
        '''
        def DIRECT_obj(x, user_data):
            return self.acq_f(np.concatenate((x, context)), use_mean=True), 0
        x, fmin, ierror = solve(DIRECT_obj, self.lower_bounds, \
            self.upper_bounds, maxf=1000)

        def LBFGS_obj(x):
            return self.acq_f(np.concatenate((x, context)), use_mean=True)
        bounds = [(self.lower_bounds[i], self.upper_bounds[i]) \
            for i in range(self.n_parameters)]
        res = minimize(LBFGS_obj, x, method='L-BFGS-B', bounds=bounds)
        return obj_f(np.concatenate([res.x, context]))[0]
예제 #19
0
    def optimize(self, f, x0=None, df=None, f_df=None):
        from DIRECT import solve

        def f_direct(x, userdata):
            x = np.atleast_2d(x)
            return f(x) * self.cost(x), 0

        lower_bounds = [b[0] for b in self.bounds]
        upper_bounds = [b[1] for b in self.bounds]

        x, fmin, _ = solve(f_direct,
                           lower_bounds,
                           upper_bounds,
                           algmethod=1,
                           maxT=self.iters,
                           maxf=self.funcs)

        return np.atleast_2d(x), np.atleast_1d(fmin)
예제 #20
0
    def find_next_sample(self):
        """ Find lcoation of next sample """

        # Optimization range:
        if self.prior_type == "normal":
            mean = self.prior_parameters['mean']
            cov = self.prior_parameters['cov']
            # TODO: Check if picking diag is OK
            lower_const = mean - 6.0 * np.sqrt(cov.diagonal())
            upper_const = mean + 6.0 * np.sqrt(cov.diagonal())

        # Wrap the optimization objective to use it within solve:
        def mod_opt_obj(X, self):
            return (self.opt_objective(X))

        # Optimize: search for new sample
        '''
        # For 1 dimensionl input use grid search
        if (self.dim == 1):
            # Use grid:
            GRID_STEP = self.opt_parameters["grid_step"]
            # Generate grid:
            X_grid = np.arange(lower_const[0], upper_const[0], GRID_STEP)
            X_grid = to_column(X_grid)
            # Calculate objective:
            objective = np.apply_along_axis(self.opt_objective, 1, X_grid, False)
            objective = objective.tolist()
            
            # Pick X that maximizes the objective:
            max_ind = objective.index(min(objective)) # min since -cost         
            Xstar   = np.array([X_grid[max_ind]])    
        else:'''
        # Use DIRECT:
        kwargs = self.opt_parameters
        Xstar, _, _ = solve(mod_opt_obj,
                            lower_const,
                            upper_const,
                            user_data=self,
                            **kwargs)
        # Assign result:
        self.Xstar = to_row(Xstar)
        print("Predicted new sample (Xstar): " + str(Xstar))
예제 #21
0
 def find_next_sample(self):            
     """ Find lcoation of next sample """
     
     # Optimization range:
     if self.prior_type == "normal":
         mean = self.prior_parameters['mean']
         cov  = self.prior_parameters['cov']
         # TODO: Check if picking diag is OK
         lower_const = mean - 6.0*np.sqrt(cov.diagonal())
         upper_const = mean + 6.0*np.sqrt(cov.diagonal())
         
     # Wrap the optimization objective to use it within solve:    
     def mod_opt_obj(X, self):
         return(self.opt_objective(X))
         
     # Optimize: search for new sample   
     '''
     # For 1 dimensionl input use grid search
     if (self.dim == 1):
         # Use grid:
         GRID_STEP = self.opt_parameters["grid_step"]
         # Generate grid:
         X_grid = np.arange(lower_const[0], upper_const[0], GRID_STEP)
         X_grid = to_column(X_grid)
         # Calculate objective:
         objective = np.apply_along_axis(self.opt_objective, 1, X_grid, False)
         objective = objective.tolist()
         
         # Pick X that maximizes the objective:
         max_ind = objective.index(min(objective)) # min since -cost         
         Xstar   = np.array([X_grid[max_ind]])    
     else:'''
     # Use DIRECT:
     kwargs = self.opt_parameters
     Xstar, _, _ = solve(mod_opt_obj, 
                         lower_const,
                         upper_const,
                         user_data=self, 
                         **kwargs)     
     # Assign result:
     self.Xstar = to_row(Xstar)
     print("Predicted new sample (Xstar): " + str(Xstar))
예제 #22
0
파일: optimizer.py 프로젝트: jprk/GPyOpt
 def optimize(self, x0=None, f=None, df=None, f_df=None):
     """
     :param x0: initial point for a local optimizer.
     :param f: function to optimize.
     :param df: gradient of the function to optimize.
     :param f_df: returns both the function to optimize and its gradient.
     """
     try:
         from DIRECT import solve
         import numpy as np
         def DIRECT_f_wrapper(f):
             def g(x, user_data):
                 return f(np.array([x])), 0
             return g
         lB = np.asarray(self.space.get_bounds())[:,0]
         uB = np.asarray(self.space.get_bounds())[:,1]
         x,_,_ = solve(DIRECT_f_wrapper(f),lB,uB, maxT=self.maxiter)
         return np.atleast_2d(x), f(np.atleast_2d(x))
     except:
         print("Cannot find DIRECT library, please install it to use this option.")
예제 #23
0
def wrapper_DIRECT(f,bounds):
    '''
    Wrapper for DIRECT optimization method. It works partitioning iteratively the domain 
    of the function. Only requieres f and the box constrains to work
    :param f: function to optimize, acquisition.
    :param bounds: tuple determining the limits of the optimizer.

    '''
    try:
        from DIRECT import solve
        import numpy as np
        def DIRECT_f_wrapper(f):
            def g(x, user_data):
                return f(np.array([x])), 0
            return g
        lB = np.asarray(bounds)[:,0]
        uB = np.asarray(bounds)[:,1]
        x,_,_ = solve(DIRECT_f_wrapper(f),lB,uB, maxT=750, maxf=2000,volper=0.005)
        return reshape(x,len(bounds))
    except:
        print("Cannot find DIRECT library, please install it to use this option.")
예제 #24
0
def minimize(func, bounds, maxiter=15000, n_restarts_optimizer=10):
    """
    # Arguments
        func: if `approx_grad`=0, `func` returns function values and gradients. Otherwise, it returns only function values. `
    
    Note: the backend minimizer evaluates `func` at each single input;thus `func` does not have to be able to perform sample-wise evaluation. 
    When `func` cannot be evaluated at a batch of samples, simply set `n_warmup` = 0. 
    """
    def real_func(x, user_data):
        return func(x), 0

    xmins = []
    fmins = []
    for i in range(n_restarts_optimizer):
        xmin, fmin, _ =  solve(real_func, l=bounds[:,0], u=bounds[:,1], eps=1e-4, maxf=2000, \
                        maxT=6000 if maxiter is None else maxiter,
                        algmethod=0, fglobal=-1e100, fglper=0.01, volper=-1.0,
                        sigmaper=-1.0, logfilename='DIRresults.txt', user_data=None)
        xmins.append(xmin)
        fmins.append(fmin)
    ind = np.argmin(fmins)
    return xmins[ind], fmins[ind]
예제 #25
0
파일: Opt.py 프로젝트: FNTwin/GPGO
    def direct(self, boundaries, max_iter=3000):
        try:
            from DIRECT import solve
        except ImportError as exc:
            raise ImportError("To use the DIRECT optimization install the Python DIRECT wrapper\n", exc)
        def wrapper(f):
            def g(x, user_data):
                return -f(np.array([x]), user_data), 0

            return g

        if self.get_info("minimization"):
            best = np.min(self.get_Y())
        else:
            best = np.max(self.get_Y())
        lb = boundaries[:, 0]
        ub = boundaries[:, 1]
        # maxf= 80000
        #max_iter=6000,1000
        x, val, _ = solve(wrapper(self._acquistion.call), lb, ub, maxT=max_iter, user_data=best, algmethod=1)
        logger.info("DIRECT:", x, val)
        return np.atleast_2d(x)
예제 #26
0
    def optimize(self, n_iterations):
        for _ in range(n_iterations):
            try:
                self.iterations += 1
                print('Iteration ' + str(self.iterations))

                context = [self.context_space[self.context_index]]
                self.context_index = (self.context_index + 1) % \
                    len(self.context_space)

                # DIRECT
                def DIRECT_obj(x, user_data):
                    return -self.acq_f(np.concatenate((x, context))), 0
                x, fmin, ierror = solve(DIRECT_obj, self.lower_bounds, \
                    self.upper_bounds, maxf=500)

                # L-BFGS-B
                def LBFGS_obj(x):
                    return -self.acq_f(np.concatenate((x, context)))
                bounds = [(self.lower_bounds[i], self.upper_bounds[i]) \
                    for i in range(self.n_inputs)]
                res = minimize(LBFGS_obj, x, method='L-BFGS-B', bounds=bounds)

                self.parameters = np.concatenate((self.parameters, \
                    np.reshape(res.x, (1,self.n_inputs))))
                self.contexts = np.concatenate((self.contexts, \
                    np.reshape(context, (1,1))))
                self.X = np.concatenate((self.parameters, self.contexts),
                                        axis=1)
                self.Y = np.concatenate((self.Y, \
                    [self.obj_f(np.concatenate((res.x, context)))]))

                self.train_GP(self.X, self.Y)
                self.model.optimize()
            except Exception:
                print('Exception encountered during optimization')
                traceback.print_exc()
예제 #27
0
    thPart = th[0:kk + 1, :]
    pSampPart = pSamp[0:kk + 1]
    m = GPy.models.GPRegression(thPart, pSampPart, kernel)

    if ((np.remainder(kk, 10) == 0) & (kk > par.preIter)):
        m.optimize()
        m.optimize_restarts(num_restarts=10)

    # Find the maximum expected value
    Mup, ys2, up95, lo95 = m.predict(np.array(thPart))
    mumax[kk] = np.max(Mup)

    # Compute the next point in which to sample the posterior
    x, fmin, ierror = solve(EIeval,
                            l,
                            u,
                            user_data=(m, mumax[kk], par.epsilonEI),
                            maxf=1000,
                            maxT=1000)

    # Set the new point and save the estimate of the EI
    th[kk + 1, :] = x
    EI[kk + 1] = fmin

    print kk

# Estimate the argument that maximising the expected information matrix
muhat, llmax, ierror = solve(MUeval, l, u, user_data=m)

# Evaluate the surrogate function over a grid
xx = arange(0.00, 1.01, 0.01)
yy = arange(0.00, 1.01, 0.01)
예제 #28
0
    def gpo(self, sm, sys, thSys):

        #=====================================================================
        # Initalisation
        #=====================================================================

        # Set initial settings
        sm.calcGradientFlag = False
        sm.calcHessianFlag = False
        self.nPars = thSys.nParInference
        self.filePrefix = thSys.filePrefix
        runNextIter = True
        self.iter = 0

        # Check algorithm settings and set to default if needed
        setSettings(self, "gpo")

        # Make a grid to evaluate the EI on
        l = np.array(self.lowerBounds[0:thSys.nParInference], dtype=np.float64)
        u = np.array(self.upperBounds[0:thSys.nParInference], dtype=np.float64)

        # Allocate vectors
        AQ = np.zeros((self.maxIter + 1, 1))
        mumax = np.zeros((self.maxIter + 1))
        thp = np.zeros((self.maxIter + 1, self.nPars))
        obp = np.zeros((self.maxIter + 1, 1))
        thhat = np.zeros((self.maxIter, self.nPars))
        thhatHessian = np.zeros((self.maxIter, self.nPars, self.nPars))
        obmax = np.zeros((self.maxIter + 1, 1))
        hyperParams = np.zeros((self.maxIter, 3 + self.nPars))
        xhatf = np.zeros((self.maxIter + 1, sys.T))

        #=====================================================================
        # Pre-run using random sampling to estimate hyperparameters
        #=====================================================================

        # Pre allocate vectors
        thPre = np.zeros((self.preIter, self.nPars))
        obPre = np.zeros((self.preIter, 1))

        #=====================================================================
        # Main loop
        #=====================================================================

        # Pre-compute hypercube points if required
        if (self.preSamplingMethod == "latinHyperCube"):
            lhd = lhs(self.nPars, samples=self.preIter)

        for kk in range(0, self.preIter):

            # Sampling parameters using uniform sampling or Latin hypercubes
            if (self.preSamplingMethod == "latinHyperCube"):
                # Sample parameters using a Latin hypercube over the parameter
                # bounds
                thPre[kk, :] = l + (u - l) * lhd[kk, :]
            elif (self.preSamplingMethod == "sobol"):
                # Sample parameters using a Sobol sequence over the parameter
                # bounds
                thPre[kk, :] = l + (u - l) * i4_sobol(self.nPars, 100 + kk)[0]
            else:
                # Draw parameters uniform over the parameter bounds
                thPre[kk, :] = l + (u - l) * np.random.random(self.nPars)

            # Evaluate the objective function in the parameters
            thSys.storeParameters(thPre[kk, :], sys)
            obPre[kk], tmp1 = self.evaluateObjectiveFunction(sm, sys, thSys)

            # Transform and save the parameters
            thSys.transform()
            thPre[kk, :] = thSys.returnParameters()[0:thSys.nParInference]

            # Write out progress if requested
            if (self.verbose):
                print("gpo: Pre-iteration: " + str(kk) + " of " +
                      str(self.preIter) + " completed, sampled " +
                      str(np.round(thPre[kk, :], 3)) + " with " +
                      str(np.round(obPre[kk], 2)) + ".")

        #=====================================================================
        # Fit the GP regression
        #=====================================================================

        # Remove nan values for the objective function
        idxNotNaN = ~np.isnan(obPre)
        thPre = thPre[(idxNotNaN).any(axis=1)]
        obPre = obPre[(idxNotNaN).any(axis=1)]

        # Specify the kernel ( Matern52 with ARD plus bias kernel to compensate
        # for non-zero mean )
        kernel = GPy.kern.Matern52(
            input_dim=self.nPars,
            ARD=True) + GPy.kern.Bias(input_dim=self.nPars)

        # Normalize the objective function evaluations
        ynorm = (obPre - np.mean(obPre)) / np.sqrt(np.var(obPre))

        # Create the model object
        m = GPy.models.GPRegression(thPre, ynorm, kernel, normalizer=False)

        #=====================================================================
        # Update hyperparameters
        #=====================================================================

        # Set constraints on hyperparameters
        m.Gaussian_noise.variance.constrain_bounded(0.01, 10.0)
        m.kern.Mat52.lengthscale.constrain_bounded(0.01, 10.0)
        m.kern.Mat52.variance.constrain_bounded(0.01, 25.0)

        # Run empirical Bayes to estimate the hyperparameters
        m.optimize('bfgs', max_iters=200)
        m.optimize_restarts(num_restarts=10, robust=True)
        self.GaussianNoiseVariance = np.array(m.Gaussian_noise.variance,
                                              copy=True)

        #=====================================================================
        # Write to output
        #=====================================================================

        self.thPre = thPre
        self.obPre = obPre
        self.m = m

        #=====================================================================
        # Main loop
        #=====================================================================

        # Save the initial parameters
        thSys.storeParameters(self.initPar, sys)
        thp[self.iter, :] = thSys.returnParameters()
        thSys.transform()

        while (runNextIter):

            # Store the parameter
            thSys.storeParameters(thp[self.iter, :], sys)
            thSys.transform()

            #------------------------------------------------------------------
            # Evalute the objective function
            #------------------------------------------------------------------
            obp[self.iter], xhatf[
                self.iter, :] = self.evaluateObjectiveFunction(sm, sys, thSys)

            # Collect the sampled data (if the objective is finite)
            idxNotNaN = ~np.isnan(obp[range(self.iter), :])
            x = np.vstack((thPre, thp[(idxNotNaN).any(axis=1)]))
            y = np.vstack((obPre, obp[(idxNotNaN).any(axis=1)]))

            #------------------------------------------------------------------
            # Fit the GP to the sampled data
            #------------------------------------------------------------------
            ynorm = (y - np.mean(y)) / np.sqrt(np.var(y))
            self.ynormMean = np.mean(y)
            self.ynormVar = np.var(y)

            m = GPy.models.GPRegression(x, ynorm, kernel, normalizer=False)

            #------------------------------------------------------------------
            # Re-estimate the hyperparameters
            #------------------------------------------------------------------
            if (np.remainder(self.iter + 1,
                             self.EstimateHyperparametersInterval) == 0):

                # Set constraints on hyperparameters
                m.Gaussian_noise.variance.constrain_bounded(0.01, 10.0)
                m.kern.Mat52.lengthscale.constrain_bounded(0.01, 10.0)
                m.kern.Mat52.variance.constrain_bounded(0.01, 25.0)

                # Run empirical Bayes to estimate the hyperparameters
                m.optimize('bfgs', max_iters=200)
                m.optimize_restarts(num_restarts=10, robust=True)

                # Save the current noise variance
                self.GaussianNoiseVariance = np.array(
                    m.Gaussian_noise.variance, copy=True)

            else:

                # Overload current noise estimate (sets to 1.0 every time we
                # add data otherwise)
                m.Gaussian_noise.variance = self.GaussianNoiseVariance

            # Save all the hyperparameters
            hyperParams[self.iter, 0] = np.array(m.Gaussian_noise.variance,
                                                 copy=True)
            hyperParams[self.iter, 1] = np.array(m.kern.bias.variance,
                                                 copy=True)
            hyperParams[self.iter, 2] = np.array(m.kern.Mat52.variance,
                                                 copy=True)
            hyperParams[self.iter, range(3, 3 + self.nPars)] = np.array(
                m.kern.Mat52.lengthscale, copy=True)

            #------------------------------------------------------------------
            # Find the maximum expected value of the GP over the sampled parameters
            #------------------------------------------------------------------
            Mup, ys2 = m.predict(x)
            mumax[self.iter] = np.max(Mup)

            #------------------------------------------------------------------
            # Compute the next point in which to sample the posterior
            #------------------------------------------------------------------

            # Optimize the AQ function
            aqThMax, aqMax, ierror = solve(self.AQfunction,
                                           l,
                                           u,
                                           user_data=(m, mumax[self.iter],
                                                      self.epsilon),
                                           maxf=1000,
                                           maxT=1000)

            # Jitter the parameter estimates
            if (self.jitterParameters == True):
                flag = 0.0

                while (flag == 0.0):
                    z = np.random.multivariate_normal(
                        np.zeros(self.nPars), self.jitteringCovariance[
                            range(self.nPars), :][:, range(self.nPars)])
                    flag = self.checkProposedParameters(aqThMax + z)

                thSys.storeParameters(aqThMax + z, sys)
                aqThMax += z

            # Set the new point and save the estimate of the AQ
            thp[self.iter + 1, :] = aqThMax
            AQ[self.iter + 1] = -aqMax

            # Update counter
            self.iter += 1

            #------------------------------------------------------------------
            # Check exit conditions
            #------------------------------------------------------------------

            # AQ function criteria
            if (AQ[self.iter] < self.tolLevel):
                print("GPO: reaches tolLevel, so exiting...")
                runNextIter = False

            # Max iteration criteria
            if (self.iter == self.maxIter):
                print("GPO: reaches maxIter, so exiting...")
                runNextIter = False

            #------------------------------------------------------------------
            # Estimate the current parameters by maximizing the GP
            #------------------------------------------------------------------
            if ((self.EstimateThHatEveryIteration == True) |
                (runNextIter == False)):
                thhatCurrent, obmaxCurrent, ierror = solve(self.MUeval,
                                                           l,
                                                           u,
                                                           user_data=m,
                                                           algmethod=1,
                                                           maxf=1000,
                                                           maxT=1000)

                thhat[self.iter - 1, :] = thhatCurrent
                obmax[self.iter - 1, :] = obmaxCurrent

                print((thhatCurrent, obmaxCurrent))

                if (self.EstimateHessianEveryIteration == True):
                    self.estimateHessian(thhatCurrent)
                    thhatHessian[self.iter - 1, :, :] = self.invHessianEstimate

            #------------------------------------------------------------------
            # Print output to console
            #------------------------------------------------------------------
            if (self.verbose):
                if (self.EstimateThHatEveryIteration == True):
                    parm = ["%.4f" % v for v in thhat[self.iter - 1, :]]
                    print(
                        "##############################################################################################"
                    )
                    print("Iteration: " + str(self.iter) +
                          " with current parameters: " + str(parm) +
                          " and AQ: " + str(np.round(AQ[self.iter], 2)))
                    print(
                        "##############################################################################################"
                    )
                else:
                    parm = ["%.4f" % v for v in thp[self.iter - 1, :]]
                    print(
                        "##############################################################################################"
                    )
                    print("Iteration: " + str(self.iter) +
                          " sampled objective function at parameters: " +
                          str(parm) + " with value: " +
                          str(np.round(obp[self.iter - 1], 2)))
                    print(
                        "##############################################################################################"
                    )

        #=====================================================================
        # Generate output
        #=====================================================================
        tmp = range(self.iter - 1)
        self.ob = obmax[tmp]
        self.th = thhat[tmp, :]
        self.thhat = thhat[self.iter - 1, :]
        self.thHessian = thhatHessian
        self.thhatHessian = thhatHessian[self.iter - 1, :, :]
        self.aq = AQ[range(self.iter)]
        self.obp = obp[tmp]
        self.thp = thp[range(self.iter), :]
        self.m = m
        self.x = x
        self.y = y
        self.xhatf = xhatf
        self.ynorm = ynorm
        self.hp = hyperParams
예제 #29
0
def minimize(func,
             bounds,
             approx_grad=0,
             maxiter=15000,
             n_warmup=100000,
             method='lbfgs',
             n_restarts_optimizer=10,
             initializer=None,
             x_init=None,
             random_state=None):
    """
    # Arguments
        func: if `approx_grad`=0, `func` returns function values and gradients. Otherwise, it returns only function values. `
    
    Note: the backend minimizer evaluates `func` at each single input;thus `func` does not have to be able to perform sample-wise evaluation. 
    When `func` cannot be evaluated at a batch of samples, simply set `n_warmup` = 0. 
    """
    if initializer is None:
        initializer = random_sampler
    if method == 'lbfgs':
        if n_warmup > 0:
            X0 = latin_hypercube_sampler(n_warmup, bounds.shape[0], bounds,
                                         random_state)
            if approx_grad:
                ys = func(X0)
            else:
                ys, _ = func(X0)
            xmin = X0[ys.argmin()]
            fmin = ys.min()
        else:
            xmin = None
            fmin = np.inf
            # Actual optimize
        X0 = latin_hypercube_sampler(n_restarts_optimizer, bounds.shape[0],
                                     bounds, random_state)
        if n_warmup > 0:
            X0 = np.vstack((xmin.reshape(1, -1), X0))
        if x_init is not None:
            X0 = np.vstack((x_init.reshape(1, -1), X0))
        for j, x0 in enumerate(X0):
            if maxiter is not None:
                _xmin, _fmin, d = scipy.optimize.fmin_l_bfgs_b(
                    func,
                    x0.reshape(1, -1),
                    approx_grad=approx_grad,
                    bounds=bounds,
                    maxiter=maxiter)
            else:
                _xmin, _fmin, d = scipy.optimize.fmin_l_bfgs_b(
                    func,
                    x0.reshape(1, -1),
                    approx_grad=approx_grad,
                    bounds=bounds)

            if _fmin < fmin:
                fmin = _fmin
                xmin = _xmin
        return xmin, fmin
    elif method == 'DIRECT':  # `func` returns function values only.

        def real_func(x, user_data):
            return func(x), 0
        xmin, fmin, _ =  solve(real_func, l=bounds[:,0], u=bounds[:,1], eps=1e-4, maxf=2000, \
                        maxT=6000 if maxiter is None else maxiter,
                        algmethod=0, fglobal=-1e100, fglper=0.01, volper=-1.0,
                        sigmaper=-1.0, logfilename='DIRresults.txt', user_data=None)
        return xmin, fmin

    else:
        raise NotImplementedError('Not recognized %s' % (method))
예제 #30
0
파일: C6.py 프로젝트: eric-vader/pydirect
    x1 = x[0]
    x2 = x[1]

    f = (4 - 2.1 * (x1 * x1) +
         (x1 * x1 * x1 * x1) / 3.0) * (x1 * x1) + x1 * x2 + (-4 + 4 *
                                                             (x2 * x2)) * (x2 *
                                                                           x2)
    return f, 0


if __name__ == '__main__':

    l = [-3, -2]
    u = [3, 2]

    x, fmin, ierror = solve(obj, l, u)

    print 'Optimal point:', x
    print 'Optimal value:', fmin
    print 'Exit status:', ierror

    #
    # Plot the results.
    #
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')

    X, Y = np.mgrid[x[0] - 1:x[0] + 1:50j, x[1] - 1:x[1] + 1:50j]
    Z = np.zeros_like(X)

    for i in range(X.size):
예제 #31
0
from DIRECT import solve
import numpy as np

trans = np.array([-1, 2, -4, 3])


def func(x, user_data):
    x -= trans
    return np.dot(x, x), 0


if __name__ == '__main__':

    l = np.array([-10, -10, -10, -10], dtype=np.float64)
    u = np.array([10, 10, 10, 10], dtype=np.float64)

    x, fmin, ierror = solve(func, l, u)

    print 'Optimal point:', x
    print 'Optimal value:', fmin
    print 'Exit status:', ierror
    thP = (th[kk, :], sys.par[1], sys.par[2], sys.par[3])
    thSys.storeParameters(thP, sys, par)
    smc.faPF(data, thSys, par)
    pSamp[kk] = smc.ll

    # Fit the GP
    thPart = th[0 : kk + 1, :]
    pSampPart = pSamp[0 : kk + 1]
    m = GPy.models.GPRegression(thPart, pSampPart, kernel)

    # Find the maximum expected value
    Mup, ys2, up95, lo95 = m.predict(np.array(thPart))
    mumax[kk] = np.max(Mup)

    # Compute the next point in which to sample the posterior
    x, fmin, ierror = solve(EIeval, l, u, user_data=(m, mumax[kk], par.epsilonEI), maxf=1000, maxT=1000)

    # Set the new point and save the estimate of the EI
    th[kk + 1, :] = x
    EI[kk + 1] = fmin

    tmp2, tmp3, ierror = solve(MUeval, l, u, user_data=m, algmethod=1)
    thhat[kk, :] = tmp2
    llmax[kk, :] = tmp3

    user_data = (m, mumax[kk], par.epsilonEI)
    Mup, ys2, up95, lo95 = m.predict(np.array(grid.reshape((len(grid), 1))))

    for ii in range(0, len(grid)):
        gridA[ii, kk], tmp = EIeval(np.array(grid[ii]), user_data)
예제 #33
0
	def optimizeDIRECT(self):		
		x, fmin, ierror = solve(self.logLike, self.lower, self.upper, volper=1e-10, algmethod=1)
예제 #34
0
파일: SH.py 프로젝트: npinto/pydirect
    j = np.arange(1, 6)
    
    tmp1 = np.dot(j, np.cos((j+1)*x[0] + j))
    tmp2 = np.dot(j, np.cos((j+1)*x[1] + j))
    
    return tmp1 * tmp2, 0


if __name__ == '__main__':

    l = [-10, -10]
    u = [10, 10]

    x, fmin, ierror = solve(
                        obj,
                        l,
                        u
                        )

    print 'Optimal point:', x
    print 'Optimal value:', fmin
    print 'Exit status:', ierror
    
    #
    # Plot the results.
    #
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')

    X, Y = np.mgrid[x[0]-2:x[0]+2:50j, x[1]-2:x[1]+2:50j]
    Z = np.zeros_like(X)
    
    # Fit the GP
    thPart      = th[0:kk+1,:];
    pSampPart   = pSamp[0:kk+1];
    m = GPy.models.GPRegression(thPart,pSampPart,kernel)
        
    if (( np.remainder(kk,10) == 0) & ( kk > par.preIter) ) :
        m.optimize()
        m.optimize_restarts(num_restarts = 10)
    
    # Find the maximum expected value
    Mup, ys2, up95, lo95 = m.predict( np.array(thPart) )
    mumax[kk] = np.max(Mup);
    
    # Compute the next point in which to sample the posterior
    x, fmin, ierror = solve(EIeval,l,u,user_data=(m,mumax[kk],par.epsilonEI),maxf=1000,maxT=1000);
    
    # Set the new point and save the estimate of the EI
    th[kk+1,:] = x;
    EI[kk+1] = fmin;
    
    print kk;

# Estimate the argument that maximising the expected information matrix
muhat, llmax, ierror = solve(MUeval,l,u,user_data=m);

# Evaluate the surrogate function over a grid
xx = arange(0.00,1.01,0.01)
yy = arange(0.00,1.01,0.01)
mout =  np.zeros((len(xx)*len(yy)))
kk=0;
    def integrate(self, m, P, fun, *args):
        ''' Input:
            m - column vector
            P - matrix
            Output:
            x - column vector
            Variables:
            X, Y - matrix of row vectors
            z - column vector
            m, x, mu - row vector
        ''' 
        
        # Initial sample and fitted GP:
        m = to_row(m)
        X = m
        Y = np.apply_along_axis(fun, 1, X, *args)
        gp = self.gp_fit(X,Y)
        
        # Optimization constraints:
        N_SIGMA = 3
        P_diag = np.sqrt(np.diag(P))
        lower_const = (m - N_SIGMA*P_diag)[0].tolist()
        upper_const = (m + N_SIGMA*P_diag)[0].tolist()
        
        # Perform sampling
        for i in range(0, self.N_SAMPLES):
            # Set extra params to pass to optimizer
            user_data  = {"gp":gp, "m":m, "P":P, "grid_search": False}
            
            if (X.shape[1] == 1):
                ''' GRID SEARCH: when we are in 1 dimension '''
                user_data['grid_search'] = True
                X_grid = np.linspace(lower_const[0], upper_const[0], self.opt_par["GRID_SIZE"])
                X_grid = to_column(X_grid)
                
                objective = self.optimization_objective(X_grid, user_data)

                max_ind = objective.index(max(objective))                
                x_star = np.array([X_grid[max_ind]])    
            else:
                ''' OPTIMIZATION: for higher dimensions '''
                x_star, _, _ = solve(self.optimization_objective, lower_const, upper_const, 
                                 user_data=user_data, algmethod = 1, 
                                 maxT = self.opt_par["MAX_T"], 
                                 maxf = self.opt_par["MAX_F"])
            
            x_star = to_row(x_star)                           
            X      = np.vstack((X, x_star))
            Y      = np.apply_along_axis(fun, 1, X, *args)
            gp     = self.gp_fit(X, Y)    
        # Reoptimize GP:                             
        # TODO: Remove unique rows:
        if (len(unique_rows(X)) != len(X)):
            print("Removing duplicated rows")
            X  = unique_rows(X)
            Y  = np.apply_along_axis(fun, 1, X, *args)
            gp = self.gp_fit(X, Y)             
            
        # Compute integral
        # Fitted GP parameters      
        w_0 = gp.rbf.variance.tolist()[0]
        w_d = np.power(gp.rbf.lengthscale.tolist(), 2)
        
        # Prior parameters
        A = np.diag(w_d)
        I = np.eye(self.X_DIM)     
        
        # Compute weigths
        z = [self.compute_z(x, A, m, P, I, w_0) for x in X]
        z = to_column(np.array(z))
        K = gp.kern.K(X); K = (K.T + K)/2.0
        W = (mo.mrdivide(z.T, K).squeeze()).tolist()
        
        # Compute mean, covariance and cross-cov
        mu_ = (z.T).dot( mo.mldivide(K, Y) )
        mu_ = to_row(mu_)
        
        # Initiale Sigma and CC
        Sigma_ = CC_ = None     
        
        # TODO: Sigma computation as closed form
        # Seems to cause problems!
        # Sigma_ = w_0/np.sqrt(np.linalg.det( 2*mo.mldivide(A,P) + I) ) - (z.T).dot(mo.mldivide(K,z))
        
        # Compute cov matrix and cross cov matrix
        for i in range(0,len(W)):
        
            # TODO: Sigma computation as sumation: 
            # Seems to work better for multidimensional problems and doesn't
            # cause problems (might be slower though):
            YY_i   = ( to_column(Y[i]-mu_) ).dot( to_row(Y[i]-mu_) )
            Sigma_ = W[i] * YY_i if i == 0 else Sigma_ + W[i] * YY_i
            
            XY_i = ( to_column(X[i]-m) ).dot( to_row(Y[i]-mu_) )
            CC_  = W[i] * XY_i if i == 0 else CC_ + W[i] * XY_i
        
        mu_    = to_column(mu_)
        Sigma_ = symetrize_cov(Sigma_)
        
        # Return results
        return(mu_, Sigma_, CC_)
예제 #37
0
    def gpo(self,sm,sys,thSys):

        #=====================================================================
        # Initalisation
        #=====================================================================

        # Set initial settings
        sm.calcGradientFlag = False;
        sm.calcHessianFlag  = False;
        self.nPars          = thSys.nParInference;
        self.filePrefix     = thSys.filePrefix;
        runNextIter         = True;
        self.iter           = 0;

        # Check algorithm settings and set to default if needed
        setSettings(self,"gpo");

        # Make a grid to evaluate the EI on
        l      = np.array(self.lowerBounds[0:thSys.nParInference], dtype=np.float64)
        u      = np.array(self.upperBounds[0:thSys.nParInference], dtype=np.float64)

        # Allocate vectors
        AQ    = np.zeros((self.maxIter+1,1))
        mumax = np.zeros((self.maxIter+1))
        thp   = np.zeros((self.maxIter+1,self.nPars))
        obp   = np.zeros((self.maxIter+1,1))
        thhat = np.zeros((self.maxIter,self.nPars))
        obmax = np.zeros((self.maxIter+1,1))

        #=====================================================================
        # Specify the GP regression model
        #=====================================================================

        # Load the GP regression model
        m           = pyGPs.GPR()

        # Specify the GP prior
        priorMean   = pyGPs.mean.Zero();
        #priorKernel = pyGPs.cov.Matern(d=5) + pyGPs.cov.Const();
        priorKernel = pyGPs.cov.RBFard(D=self.nPars) + pyGPs.cov.Const();

        m.setPrior(mean=priorMean, kernel=priorKernel)

        # Setup the optimization routine
        m.setOptimizer('Minimize', num_restarts=10)

        #=====================================================================
        # Pre-run using random sampling to estimate hyperparameters
        #=====================================================================

        # Pre allocate vectors
        thPre = np.zeros((self.preIter,self.nPars));
        obPre = np.zeros((self.preIter,1));

        #=====================================================================
        # Main loop
        #=====================================================================
        for kk in range(0,self.preIter):

            # Sample parameters

            #if (self.optType == "MAPparameterEstimation"):
            # Sample prior
            #thPre[kk,:] = thSys.samplePrior()
            #else:
            # Draw parameters uniform over the parameter bounds
            thPre[kk,:]  =  l + (u-l) * np.random.random( self.nPars )

            # Evaluate the objective function in the parameters
            thSys.storeParameters(thPre[kk,:],sys);
            obPre[kk] = self.evaluateObjectiveFunction( sm, sys, thSys );

            # Transform and save the parameters
            thSys.transform();
            thPre[kk,:] = thSys.returnParameters()[0:thSys.nParInference];

            # Write out progress if requested
            if (self.verbose):
                print("gpo: Pre-iteration: " + str(kk) + " of " + str(self.preIter) + " completed, sampled " + str(thPre[kk,:]) + " with " + str(obPre[kk]) + ".")

        #=====================================================================
        # Fit the GP regression
        #=====================================================================

        # Remove nan values for the objective function
        idxNotNaN = ~np.isnan( obPre );
        thPre     = thPre[(idxNotNaN).any(axis=1)];
        obPre     = obPre[(idxNotNaN).any(axis=1)];

        # Normalize the objective function evaluations
        ynorm = ( obPre - np.mean(obPre) ) / np.sqrt( np.var(obPre) )

        # Optimize the Hyperparameters
        m.optimize(thPre, ynorm)
        #yp = m.predict(np.arange(0.01,1.00,0.01))
        #m.plot()

        #=====================================================================
        # Write to output
        #=====================================================================

        self.thPre  = thPre;
        self.obPre  = obPre;
        self.m      = m;

        #=====================================================================
        # Main loop
        #=====================================================================

        # Save the initial parameters
        thSys.storeParameters(self.initPar,sys);
        thp[self.iter,:]  = thSys.returnParameters();
        thSys.transform();

        while ( runNextIter ):

            # Store the parameter
            thSys.storeParameters(thp[self.iter,:],sys);
            thSys.transform();

            #------------------------------------------------------------------
            # Evalute the objective function
            #------------------------------------------------------------------
            obp[self.iter] = self.evaluateObjectiveFunction( sm, sys, thSys );

            # Collect the sampled data (if the objective is finite)
            if np.isfinite(obp[self.iter]):
                x = np.vstack( (thPre,thp[range(self.iter),:]) );
                y = np.vstack( (obPre,obp[range(self.iter),:]) );

            # Normalize the objective function evaluations
            ynorm = ( y - np.mean(y) ) / np.sqrt( np.var(y) )

            #------------------------------------------------------------------
            # Fit the GP to the sampled data
            #------------------------------------------------------------------

            # Optimize the hyperparameters if needed
            #if ( np.remainder(self.iter+1,self.EstimateHyperparametersInterval) == 0):
            #    m.optimize(x, ynorm)
            #else:
            #    m.setData(x, ynorm)

            m.optimize(x, ynorm)

            # Extract the posterior values of the hyperparameters
            post = m.posterior;

            #------------------------------------------------------------------
            # Find the maximum expected value of the GP over the sampled parameters
            #------------------------------------------------------------------
            Mup, ys2, fmu, fs2, lp = m.predict_with_posterior(post, np.array( np.vstack( (thPre,thp[range(self.iter),:]) ) ) )
            mumax[self.iter] = np.max( Mup );

            #------------------------------------------------------------------
            # Compute the next point in which to sample the posterior
            #------------------------------------------------------------------

            # Optimize the AQ function
            aqThMax, aqMax, ierror = solve(self.AQfunction,l,u,user_data=(m,mumax[self.iter],self.epsilon,post),maxf=1000,maxT=1000);

            # Jitter the parameter estimates
            if ( self.jitterParameters == True ):
                aqThMax += np.random.multivariate_normal( np.zeros(self.nPars), self.jitteringCovariance[range(self.nPars),:][:,range(self.nPars)] );

            # Set the new point and save the estimate of the AQ
            thp[self.iter+1,:] = aqThMax;
            AQ[self.iter+1]    = -aqMax;

            # Update counter
            self.iter += 1;

            #------------------------------------------------------------------
            # Check exit conditions
            #------------------------------------------------------------------

            # AQ function criteria
            if ( AQ[self.iter] < self.tolLevel ):
                print("GPO: reaches tolLevel, so exiting...")
                runNextIter = False;

            # Max iteration criteria
            if ( self.iter == self.maxIter ):
                print("GPO: reaches maxIter, so exiting...")
                runNextIter = False;

            #------------------------------------------------------------------
            # Estimate the current parameters by maximizing the GP
            #------------------------------------------------------------------

            if ( ( self.EstimateThHatEveryIteration == True ) | runNextIter == False ):
                thhatCurrent, obmaxCurrent, ierror = solve(self.MUeval,l,u,user_data=(m,post),algmethod=1);
                thhat[self.iter-1,:] = thhatCurrent;
                obmax[self.iter-1,:] = obmaxCurrent;

            #------------------------------------------------------------------
            # Print output to console
            #------------------------------------------------------------------
            if ( self.verbose ):
                if ( self.EstimateThHatEveryIteration == True ):
                    parm = ["%.4f" % v for v in thhat[self.iter-1,:]];
                    print("##############################################################################################")
                    print("Iteration: " + str(self.iter) + " with current parameters: " + str(parm) + " and AQ: " + str(AQ[self.iter]) )
                    print("##############################################################################################")
                else:
                    parm = ["%.4f" % v for v in thp[self.iter-1,:]];
                    print("##############################################################################################")
                    print("Iteration: " + str(self.iter) + " sampled objective function at parameters: " + str(parm) + " with value: " + str(obp[self.iter-1]) )
                    print("##############################################################################################")


        #=====================================================================
        # Generate output
        #=====================================================================
        tmp         = range(self.iter-1);
        self.ob     = obmax[tmp];
        self.th     = thhat[tmp,:];
        self.thhat  = thhat[self.iter-1,:]
        self.aq     = AQ[range(self.iter)];
        self.obp    = obp[tmp];
        self.thp    = thp[range(self.iter),:];

        self.m      = m;
        self.x      = x;
        self.y      = y;
예제 #38
0
    def optimize_acq_f(self, n_iter=20, method='random'):
        self.logger.debug("optimizer for acq: {}".format(method))
        self.logger.debug("n_iter={}".format(n_iter))
        self.logger.debug("optninit={}".format(self.config))
        self.acquisition.update()  #especially for information-theory based acq

        def obj_LBFGS(x):
            #self.logger.debug('input to obj_LBFGS={}'.format(x))
            return -self.acq_f(x)

        def obj_DIRECT(x, u):
            return -self.acq_f(x), 0

        x_tries = np.random.uniform(self.bounds[0, :],
                                    self.bounds[1, :],
                                    size=(self.config['optninit'],
                                          self.bounds.shape[1]))
        if method == 'random':
            x_seeds = np.random.uniform(self.bounds[0, :],
                                        self.bounds[1, :],
                                        size=(n_iter, self.bounds.shape[1]))
            ys = -obj_LBFGS(x_tries)
            x_max = x_tries[ys.argmax()].reshape((1, -1))
            max_acq = ys.max()
            self.logger.debug('max_acq from random={}'.format(max_acq))
            for x_try in x_seeds:
                # Find the minimum of minus the acquisition function

                res = minimize(obj_LBFGS,
                               x_try.reshape(1, -1),
                               bounds=self.reformat_bounds(self.bounds),
                               method="L-BFGS-B")
                if not res.success:
                    self.logger.debug(
                        'minimize is not successful and going to try another random init(x_seeds) for minimize'
                    )
                    continue

                # Store it if better than previous minimum(maximum).
                if max_acq is None or -res.fun[0] > max_acq:
                    x_max = res.x
                    max_acq = -res.fun[0]
                    self.logger.debug(
                        'use a result from minimize whose max_acq={}'.format(
                            max_acq))
        elif method == 'direct':
            x, _, _ = solve(obj_DIRECT,
                            self.bounds[0, :],
                            self.bounds[1, :],
                            maxf=1000,
                            logfilename=logfile)
            x = minimize(obj_LBFGS,
                         x,
                         bounds=self.reformat_bounds(self.bounds),
                         method='L-BFGS-B').x
            x_max = x
        else:
            raise NotImplementedError
        x_max = x_max.reshape((1, -1))
        x_max[:, -self.zdim:] = self._roundfidelity(x_max[:, -self.zdim:])
        self.logger.debug(
            'end optimizing acq_f, with x_max={}, self.acq_f(x_max)={}'.format(
                x_max, self.acq_f(x_max)))
        return np.clip(x_max, self.bounds[0, :], self.bounds[1, :]).reshape(
            (1, -1))
예제 #39
0
from DIRECT import solve
import numpy as np

trans = np.array([-1, 2, -4, 3])

def func(x, user_data):
    x -= trans
    return np.dot(x, x), 0


if __name__ == '__main__':

    l = np.array([-10, -10, -10, -10], dtype=np.float64)
    u = np.array([10, 10, 10, 10], dtype=np.float64)

    x, fmin, ierror = solve(
                        func,
                        l,
                        u
                        )

    print 'Optimal point:', x
    print 'Optimal value:', fmin
    print 'Exit status:', ierror
    
    
예제 #40
0
    def optimize_posterior_mean(self, n_iter=20, method='random'):
        self.logger.debug("optimizer for posterior mean: {}".format(method))
        self.logger.debug("n_iter={}".format(n_iter))

        def obj_LBFGS(x):
            x = np.reshape(x, (-1, self.xdim))
            mean, _ = self.gps.predict(x)
            if self.acq_name == 'ES' or self.acq_name == 'MES':
                mean = -mean
            return -mean

        def obj_DIRECT(x, u):
            x = np.reshape(x, (-1, self.xdim))
            mean, _ = self.gps.predict(x)
            if self.acq_name == 'ES' or self.acq_name == 'MES':
                mean = -mean
            return -mean, 0

        x_tries = np.random.uniform(self.bounds[0, :],
                                    self.bounds[1, :],
                                    size=(self.config['optninit'],
                                          self.bounds.shape[1]))
        if method == 'random':
            x_seeds = np.random.uniform(self.bounds[0, :],
                                        self.bounds[1, :],
                                        size=(n_iter, self.bounds.shape[1]))
            ys = -obj_LBFGS(x_tries)
            x_max = x_tries[ys.argmax()].reshape((1, -1))
            max_acq = ys.max()
            self.logger.debug('max_acq from random={}'.format(max_acq))
            for x_try in x_seeds:
                res = minimize(obj_LBFGS,
                               x_try.reshape(1, -1),
                               bounds=self.reformat_bounds(self.bounds),
                               method='L-BFGS-B')
                if not res.success:
                    self.logger.debug(
                        'minimize is not successful and going to try another random init(x_seeds) for minimize'
                    )
                    continue

                if max_acq is None or -res.fun[0] > max_acq:
                    x_max = res.x
                    max_acq = -res.fun[0]
                    self.logger.debug(
                        'use a result from minimize whose max_acq={}'.format(
                            max_acq))

        elif method == 'direct':
            x, _, _ = solve(obj_DIRECT,
                            self.bounds[0, :],
                            self.bounds[1, :],
                            maxf=1000,
                            logfilename=logfile)
            x = minimize(obj_LBFGS,
                         x,
                         bounds=self.reformat_bounds(self.bounds),
                         method='L-BFGS-B').x
            x_max = x
        else:
            raise NotImplementedError
        x_max = x_max.reshape((1, -1))
        self.logger.debug(
            'end optimizing posterior mean, with x_max={}, posteror_mean(x_max)={}'
            .format(x_max, -obj_LBFGS(x_max)))
        return np.clip(x_max, self.bounds[0, :], self.bounds[1, :]).reshape(
            (1, -1))
예제 #41
0
    gp     = user_data['gp']
    mu     = user_data['mu'].squeeze()
    Sigma  = user_data['Sigma']

    X = to_row(X)
    
    _, gp_cov = gp.predict(X)
    pi_x = pi(X, mu, Sigma)
    cost = (pi_x**2 * gp_cov).squeeze()
    return( -cost , 0 )
    
# Find new sample
user_data  = {"gp":gp, "mu":x0, "Sigma":p0}

x_star, _, _ = solve(optimization_objective, lower_const, upper_const, 
                             user_data=user_data, algmethod = 1, 
                             maxT = 3000, maxf = 10000)

x_star    = np.reshape(x_star, (1, X_DIM))                             
y_star, _ = gp.predict(x_star) 
X = np.vstack((X, x_star))
Y = np.vstack((Y, y_star))

#gp = GPy.models.GPRegression(X,Y,kernel)
#gp.optimize_restarts(num_restarts=16, verbose=False, parallel=False)

# Compute integral
cov_Q = np.eye(Y_DIM)
N = len(X)
# Fitted GP parameters      
w_0 = gp.rbf.variance.tolist()[0]
예제 #42
0
    def gpo(self, sm, sys, thSys):

        #=====================================================================
        # Initalisation
        #=====================================================================

        # Set initial settings
        sm.calcGradientFlag = False
        sm.calcHessianFlag = False
        self.nPars = thSys.nParInference
        self.filePrefix = thSys.filePrefix
        runNextIter = True
        self.iter = 0

        # Check algorithm settings and set to default if needed
        setSettings(self, "gpo")

        # Make a grid to evaluate the EI on
        l = np.array(self.lowerBounds[0:thSys.nParInference], dtype=np.float64)
        u = np.array(self.upperBounds[0:thSys.nParInference], dtype=np.float64)

        # Allocate vectors
        AQ = np.zeros((self.maxIter + 1, 1))
        mumax = np.zeros((self.maxIter + 1))
        thp = np.zeros((self.maxIter + 1, self.nPars))
        obp = np.zeros((self.maxIter + 1, 1))
        thhat = np.zeros((self.maxIter, self.nPars))
        thhatHessian = np.zeros((self.maxIter, self.nPars, self.nPars))
        obmax = np.zeros((self.maxIter + 1, 1))
        hyperParams = np.zeros((self.maxIter, 3 + self.nPars))
        xhatf = np.zeros((self.maxIter + 1, sys.T))

        #=====================================================================
        # Pre-run using random sampling to estimate hyperparameters
        #=====================================================================

        # Pre allocate vectors
        thPre = np.zeros((self.preIter, self.nPars))
        obPre = np.zeros((self.preIter, 1))

        #=====================================================================
        # Main loop
        #=====================================================================

        # Pre-compute hypercube points if required
        if (self.preSamplingMethod == "latinHyperCube"):
            lhd = lhs(self.nPars, samples=self.preIter)

        for kk in range(0, self.preIter):

            # Sampling parameters using uniform sampling or Latin hypercubes
            if (self.preSamplingMethod == "latinHyperCube"):
                # Sample parameters using a Latin hypercube over the parameter
                # bounds
                thPre[kk, :] = l + (u - l) * lhd[kk, :]
            elif (self.preSamplingMethod == "sobol"):
                # Sample parameters using a Sobol sequence over the parameter
                # bounds
                thPre[kk, :] = l + (u - l) * i4_sobol(self.nPars, 100 + kk)[0]
            else:
                # Draw parameters uniform over the parameter bounds
                thPre[kk, :] = l + (u - l) * np.random.random(self.nPars)

            # Evaluate the objective function in the parameters
            thSys.storeParameters(thPre[kk, :], sys)
            obPre[kk], tmp1 = self.evaluateObjectiveFunction(sm, sys, thSys)

            # Transform and save the parameters
            thSys.transform()
            thPre[kk, :] = thSys.returnParameters()[0:thSys.nParInference]

            # Write out progress if requested
            if (self.verbose):
                print("gpo: Pre-iteration: " + str(kk) + " of " + str(self.preIter) + " completed, sampled " +
                      str(np.round(thPre[kk, :], 3)) + " with " + str(np.round(obPre[kk], 2)) + ".")

        #=====================================================================
        # Fit the GP regression
        #=====================================================================

        # Remove nan values for the objective function
        idxNotNaN = ~np.isnan(obPre)
        thPre = thPre[(idxNotNaN).any(axis=1)]
        obPre = obPre[(idxNotNaN).any(axis=1)]

        # Specify the kernel ( Matern52 with ARD plus bias kernel to compensate
        # for non-zero mean )
        kernel = GPy.kern.Matern52(
            input_dim=self.nPars, ARD=True) + GPy.kern.Bias(input_dim=self.nPars)

        # Normalize the objective function evaluations
        ynorm = (obPre - np.mean(obPre)) / np.sqrt(np.var(obPre))

        # Create the model object
        m = GPy.models.GPRegression(thPre, ynorm, kernel, normalizer=False)

        #=====================================================================
        # Update hyperparameters
        #=====================================================================

        # Set constraints on hyperparameters
        m.Gaussian_noise.variance.constrain_bounded(0.01, 10.0)
        m.kern.Mat52.lengthscale.constrain_bounded(0.01, 10.0)
        m.kern.Mat52.variance.constrain_bounded(0.01, 25.0)

        # Run empirical Bayes to estimate the hyperparameters
        m.optimize('bfgs', max_iters=200)
        m.optimize_restarts(num_restarts=10, robust=True)
        self.GaussianNoiseVariance = np.array(
            m.Gaussian_noise.variance, copy=True)

        #=====================================================================
        # Write to output
        #=====================================================================

        self.thPre = thPre
        self.obPre = obPre
        self.m = m

        #=====================================================================
        # Main loop
        #=====================================================================

        # Save the initial parameters
        thSys.storeParameters(self.initPar, sys)
        thp[self.iter, :] = thSys.returnParameters()
        thSys.transform()

        while (runNextIter):

            # Store the parameter
            thSys.storeParameters(thp[self.iter, :], sys)
            thSys.transform()

            #------------------------------------------------------------------
            # Evalute the objective function
            #------------------------------------------------------------------
            obp[self.iter], xhatf[self.iter,
                                  :] = self.evaluateObjectiveFunction(sm, sys, thSys)

            # Collect the sampled data (if the objective is finite)
            idxNotNaN = ~np.isnan(obp[range(self.iter), :])
            x = np.vstack((thPre, thp[(idxNotNaN).any(axis=1)]))
            y = np.vstack((obPre, obp[(idxNotNaN).any(axis=1)]))

            #------------------------------------------------------------------
            # Fit the GP to the sampled data
            #------------------------------------------------------------------
            ynorm = (y - np.mean(y)) / np.sqrt(np.var(y))
            self.ynormMean = np.mean(y)
            self.ynormVar = np.var(y)

            m = GPy.models.GPRegression(x, ynorm, kernel, normalizer=False)

            #------------------------------------------------------------------
            # Re-estimate the hyperparameters
            #------------------------------------------------------------------
            if (np.remainder(self.iter + 1, self.EstimateHyperparametersInterval) == 0):

                # Set constraints on hyperparameters
                m.Gaussian_noise.variance.constrain_bounded(0.01, 10.0)
                m.kern.Mat52.lengthscale.constrain_bounded(0.01, 10.0)
                m.kern.Mat52.variance.constrain_bounded(0.01, 25.0)

                # Run empirical Bayes to estimate the hyperparameters
                m.optimize('bfgs', max_iters=200)
                m.optimize_restarts(num_restarts=10, robust=True)

                # Save the current noise variance
                self.GaussianNoiseVariance = np.array(
                    m.Gaussian_noise.variance, copy=True)

            else:

                # Overload current noise estimate (sets to 1.0 every time we
                # add data otherwise)
                m.Gaussian_noise.variance = self.GaussianNoiseVariance

            # Save all the hyperparameters
            hyperParams[self.iter, 0] = np.array(
                m.Gaussian_noise.variance, copy=True)
            hyperParams[self.iter, 1] = np.array(
                m.kern.bias.variance, copy=True)
            hyperParams[self.iter, 2] = np.array(
                m.kern.Mat52.variance, copy=True)
            hyperParams[self.iter, range(
                3, 3 + self.nPars)] = np.array(m.kern.Mat52.lengthscale, copy=True)

            #------------------------------------------------------------------
            # Find the maximum expected value of the GP over the sampled parameters
            #------------------------------------------------------------------
            Mup, ys2 = m.predict(x)
            mumax[self.iter] = np.max(Mup)

            #------------------------------------------------------------------
            # Compute the next point in which to sample the posterior
            #------------------------------------------------------------------

            # Optimize the AQ function
            aqThMax, aqMax, ierror = solve(self.AQfunction, l, u, user_data=(
                m, mumax[self.iter], self.epsilon), maxf=1000, maxT=1000)

            # Jitter the parameter estimates
            if (self.jitterParameters == True):
                flag = 0.0

                while (flag == 0.0):
                    z = np.random.multivariate_normal(np.zeros(self.nPars), self.jitteringCovariance[
                                                      range(self.nPars), :][:, range(self.nPars)])
                    flag = self.checkProposedParameters(aqThMax + z)

                thSys.storeParameters(aqThMax + z, sys)
                aqThMax += z

            # Set the new point and save the estimate of the AQ
            thp[self.iter + 1, :] = aqThMax
            AQ[self.iter + 1] = -aqMax

            # Update counter
            self.iter += 1

            #------------------------------------------------------------------
            # Check exit conditions
            #------------------------------------------------------------------

            # AQ function criteria
            if (AQ[self.iter] < self.tolLevel):
                print("GPO: reaches tolLevel, so exiting...")
                runNextIter = False

            # Max iteration criteria
            if (self.iter == self.maxIter):
                print("GPO: reaches maxIter, so exiting...")
                runNextIter = False

            #------------------------------------------------------------------
            # Estimate the current parameters by maximizing the GP
            #------------------------------------------------------------------
            if ((self.EstimateThHatEveryIteration == True) | (runNextIter == False)):
                thhatCurrent, obmaxCurrent, ierror = solve(
                    self.MUeval, l, u, user_data=m, algmethod=1, maxf=1000, maxT=1000)

                thhat[self.iter - 1, :] = thhatCurrent
                obmax[self.iter - 1, :] = obmaxCurrent

                print((thhatCurrent, obmaxCurrent))

                if (self.EstimateHessianEveryIteration == True):
                    self.estimateHessian(thhatCurrent)
                    thhatHessian[self.iter - 1, :, :] = self.invHessianEstimate

            #------------------------------------------------------------------
            # Print output to console
            #------------------------------------------------------------------
            if (self.verbose):
                if (self.EstimateThHatEveryIteration == True):
                    parm = ["%.4f" % v for v in thhat[self.iter - 1, :]]
                    print(
                        "##############################################################################################")
                    print("Iteration: " + str(self.iter) + " with current parameters: " +
                          str(parm) + " and AQ: " + str(np.round(AQ[self.iter], 2)))
                    print(
                        "##############################################################################################")
                else:
                    parm = ["%.4f" % v for v in thp[self.iter - 1, :]]
                    print(
                        "##############################################################################################")
                    print("Iteration: " + str(self.iter) + " sampled objective function at parameters: " +
                          str(parm) + " with value: " + str(np.round(obp[self.iter - 1], 2)))
                    print(
                        "##############################################################################################")

        #=====================================================================
        # Generate output
        #=====================================================================
        tmp = range(self.iter - 1)
        self.ob = obmax[tmp]
        self.th = thhat[tmp, :]
        self.thhat = thhat[self.iter - 1, :]
        self.thHessian = thhatHessian
        self.thhatHessian = thhatHessian[self.iter - 1, :, :]
        self.aq = AQ[range(self.iter)]
        self.obp = obp[tmp]
        self.thp = thp[range(self.iter), :]
        self.m = m
        self.x = x
        self.y = y
        self.xhatf = xhatf
        self.ynorm = ynorm
        self.hp = hyperParams
예제 #43
0
    def integrate(self, m, P, fun, **kwargs):
        ''' Input:
            m - column vector
            P - matrix
            Output:
            x - column vector
            Variables:
            X, Y - matrix of row vectors
            z - column vector
            m, x, mu - row vector
        ''' 
        
        # Initial sample and fitted GP:
        m = to_row(m)
        X = m
        Y = np.apply_along_axis(fun, 1, X, **kwargs)
        gp = self.gp_fit(X,Y)
        
        # Optimization constraints:
        N_SIGMA = 3
        P_diag = np.sqrt(np.diag(P))
        lower_const = (m - N_SIGMA*P_diag)[0].tolist()
        upper_const = (m + N_SIGMA*P_diag)[0].tolist()
        
        # Perform sampling
        for i in range(0, self.N_SAMPLES):
            # Set extra params to pass to optimizer
            user_data  = {"gp":gp, "m":m, "P":P}
            x_star, _, _ = solve(self.optimization_objective, lower_const, upper_const, 
                             user_data=user_data, algmethod = 1, 
                             maxT = self.opt_par["MAX_T"], 
                             maxf = self.opt_par["MAX_F"])

            x_star = to_row(x_star)                           
            X      = np.vstack((X, x_star))
            Y      = np.apply_along_axis(fun, 1, X, **kwargs)
            gp     = self.gp_fit(X, Y)
            
        # Reoptimize GP:                             
        # TODO: Remove unique rows:
        X  = unique_rows(X)
        Y  = np.apply_along_axis(fun, 1, X, **kwargs)
        gp = self.gp_fit(X, Y)   

        # Compute integral
        # Fitted GP parameters      
        w_0 = gp.rbf.variance.tolist()[0]
        w_d = gp.rbf.lengthscale.tolist()
        # Prior parameters
        A = np.diag(w_d)
        I = np.eye(self.X_DIM)     
        
        # Compute weigths
        z = [self.compute_z(a, A, m, P, I, w_0) for a in X]
        z = to_column(np.array(z))
        K = gp.kern.K(X); K = (K.T + K)/2.0
        W = (mo.mrdivide(z.T, K).squeeze()).tolist()
        
        # Compute mean, covariance and cross-cov
        mu_ = (z.T).dot( mo.mldivide(K, Y) )
        mu_ = to_row(mu_)
        
        Sigma_ = CC_ = None     

        for i in range(0,len(z)):
            YY_i   = ( to_column(Y[i]-mu_) ).dot( to_row(Y[i]-mu_) )
            Sigma_ = W[i] * YY_i if i == 0 else Sigma_ + W[i] * YY_i
            
            XY_i = ( to_column(X[i]-m) ).dot( to_row(Y[i]-mu_) )
            CC_  = W[i] * XY_i if i == 0 else CC_ + W[i] * XY_i
        
        mu_    = to_column(mu_)
        Sigma_ = symetrize_cov(Sigma_)
        
        # Return results
        return(mu_, Sigma_, CC_)