Exemple #1
0
    def test_induced_sampling(self):
        """
        An integration test for the whole routine
        """
        dimension = 3
        parameters = [Parameter(3, "Uniform", upper=1, lower=-1)]*dimension
        basis = Basis("total-order", [3]*dimension)

        induced_sampling = Induced(parameters, basis)

        quadrature_points = induced_sampling.get_points()
        assert quadrature_points.shape == (induced_sampling.samples_number, 3)
 def test_generate_sampling_class(self):
     """
     test if the method returns a function object for sampling interface
     """
     parameters = [Parameter(1, "gaussian")] * 3
     basis = Basis("total order")
     generator_class = Sampling(parameters, basis,
                                ('induced-sampling', {
                                    "sampling-ratio": 2,
                                    "subsampling-optimisation": 'qr'
                                }))
     assert generator_class.sampling_class.__class__ == InducedSampling
Exemple #3
0
 def _calculate_subspace(self, S, f):
     parameters = [
         Parameter(distribution='uniform',
                   lower=np.min(S[:, i]),
                   upper=np.max(S[:, i]),
                   order=1) for i in range(0, self.n)
     ]
     self.poly = Poly(parameters, basis=Basis('total-order'), method='least-squares', \
                  sampling_args={'sample-points': S, 'sample-outputs': f})
     self.poly.set_model()
     self.Subs = Subspaces(full_space_poly=self.poly,
                           method='active-subspace',
                           subspace_dimension=self.d)
     if self.subspace_method == 'variable-projection':
         U0 = self.Subs.get_subspace()[:, :self.d]
         self.Subs = Subspaces(method='variable-projection', sample_points=S, sample_outputs=f, \
                 subspace_init=U0, subspace_dimension=self.d, polynomial_degree=2, max_iter=300)
         self.U = self.Subs.get_subspace()[:, :self.d]
     elif self.subspace_method == 'active-subspaces':
         U0 = self.Subs.get_subspace()[:, 1].reshape(-1, 1)
         U1 = null_space(U0.T)
         self.U = U0
         for i in range(self.d - 1):
             R = []
             for j in range(U1.shape[1]):
                 U = np.hstack((self.U, U1[:, j].reshape(-1, 1)))
                 Y = np.dot(S, U)
                 myParameters = [Parameter(distribution='uniform', lower=np.min(Y[:,k]), upper=np.max(Y[:,k]), \
                         order=2) for k in range(Y.shape[1])]
                 myBasis = Basis('total-order')
                 poly = Poly(myParameters, myBasis, method='least-squares', \
                         sampling_args={'sample-points':Y, 'sample-outputs':f})
                 poly.set_model()
                 f_eval = poly.get_polyfit(Y)
                 _, _, r, _, _ = linregress(f_eval.flatten(), f.flatten())
                 R.append(r**2)
             index = np.argmax(R)
             self.U = np.hstack((self.U, U1[:, index].reshape(-1, 1)))
             U1 = np.delete(U1, index, 1)
Exemple #4
0
                        def _fit_poly(X, y):

#                                try:

                                N, d = X.shape
                                myParameters = []

                                for dimension in range(d):
                                        values = X[:,dimension]
                                        values_min = np.amin(values)
                                        values_max = np.amax(values)

                                        if (values_min - values_max) ** 2 < 0.01:
                                            values_min -= 0.01
                                            values_max += 0.01
                                            myParameters.append(Parameter(distribution='Uniform', lower=values_min, upper=values_max, order=self.order))
                                        else:
                                            if self.distribution == 'uniform':
                                                myParameters.append(Parameter(distribution='Uniform', lower=values_min, upper=values_max, order=self.order))
                                            elif self.distribution == 'data':
                                                input_dist = Weight(values, support=[values_min, values_max], pdf=False)
                                                myParameters.append(Parameter(distribution='data',weight_function=input_dist,order=self.order))

                                if self.basis == "hyperbolic-basis":
                                        myBasis = Basis(self.basis, orders=[self.order for _ in range(d)], q=0.5)
                                else:
                                        myBasis = Basis(self.basis, orders=[self.order for _ in range(d)])

                                container["index_node_global"] += 1
                                poly = Poly(myParameters, myBasis, method=self.poly_method, sampling_args={'sample-points':X, 'sample-outputs':y}, solver_args=self.poly_solver_args)
                                poly.set_model()

                                mse = np.linalg.norm(y - poly.get_polyfit(X).reshape(-1)) ** 2 / N
#                                except Exception as e:
#                                        print("Warning fitting of Poly failed:", e)
#                                        print(d, values_min, values_max)
#                                        mse, poly = np.inf, None

                                return mse, poly
Exemple #5
0
    def test_sampling(self):
        d = 4
        order = 5
        param = Parameter(distribution='uniform',
                          order=order,
                          lower=-1.0, upper=1.0)
        myparameters = [param for _ in range(d)]
        mybasis = Basis('total-order')
        mypoly = Poly(myparameters, mybasis,
                      method='least-squares',
                      sampling_args={'mesh': 'induced',
                                     'subsampling-algorithm': 'qr',
                                     'sampling-ratio': 1})

        assert mypoly._quadrature_points.shape == (mypoly.basis.cardinality, d)
def vandermonde(eta, p):
    _, n = eta.shape
    listing = []
    for i in range(0, n):
        listing.append(p)
    Object = Basis('total-order', listing)
    #Establish n Parameter objects
    params = []
    P = Parameter(order=p, lower=-1, upper=1, distribution='uniform')
    for i in range(0, n):
        params.append(P)
    #Use the params list to establish the Poly object
    Polybasis = Poly(params, Object, method='least-squares')
    V = Polybasis.get_poly(eta)
    V = V.T
    return V, Polybasis
    def _set_sparsegrid_quadrature_rule(self, orders=None):
        """
        Generates a sparse grid quadrature rule based on the parameters in Poly.

        :param Poly self:
            An instance of the Poly class.
        :param list orders:
            A list of the highest polynomial orders along each dimension.
        :return:
            **x**: A numpy.ndarray of sampled quadrature points with shape (number_of_samples, dimension).

            **w**: A numpy.ndarray of the corresponding quadrature weights with shape (number_of_samples, 1).
        """
        sparse_indices, sparse_factors, not_used = self.basis.get_basis()
        rows = len(sparse_indices)
        cols = len(sparse_indices[0])

        # For storage we use dictionaries
        points_store = {}
        weights_store = {}
        indices = np.zeros((rows))
        self.tensor_product_list = []
        for i in range(0,rows):
            orders = sparse_indices[i,:]
            myBasis = Basis('tensor-grid')
            myTensor = Tensorgrid(parameters=self.parameters, basis=myBasis, orders=orders.astype(int) )
            self.tensor_product_list.append(myTensor)
            pts = myTensor.points
            wts = myTensor.weights * sparse_factors[i]
            points_store[i] = pts
            weights_store[i] = wts
            indices[i] = myTensor.basis.cardinality
            del myTensor, myBasis
        sum_indices = int(np.sum(indices))
        points_saved = np.zeros((sum_indices, self.basis.dimensions))
        weights_saved = np.zeros((sum_indices))
        counter = int(0)
        for i in range(0,rows):
            for j in range(0, int(indices[i])):
                for d in range(0, self.basis.dimensions):
                    points_saved[counter,d] = points_store[i][j, d]
                weights_saved[counter] = weights_store[i][j]
                counter = counter + 1
        self.points , indices = np.unique(points_saved, axis=0, return_index=True)
        self.weights = weights_saved[indices]
        self.sparse_indices = sparse_indices
        self.sparse_weights = sparse_factors
Exemple #8
0
 def __init__(self,
              method,
              full_space_poly=None,
              sample_points=None,
              sample_outputs=None,
              polynomial_degree=2,
              subspace_dimension=2,
              bootstrap=False,
              subspace_init=None,
              max_iter=1000,
              tol=None):
     self.full_space_poly = full_space_poly
     self.sample_points = sample_points
     self.Y = None  # for the zonotope vertices
     if self.sample_points is not None:
         self.sample_points = standardise(sample_points)
     self.sample_outputs = sample_outputs
     self.method = method
     self.subspace_dimension = subspace_dimension
     self.polynomial_degree = polynomial_degree
     self.bootstrap = bootstrap
     if self.method.lower() == 'active-subspace' or self.method.lower(
     ) == 'active-subspaces':
         self.method == 'active-subspace'
         if self.full_space_poly is None:
             N, d = self.sample_points.shape
             param = Parameter(distribution='uniform',
                               lower=-1,
                               upper=1.,
                               order=self.polynomial_degree)
             myparameters = [param for _ in range(d)]
             mybasis = Basis("total-order")
             mypoly = Poly(myparameters, mybasis, method='least-squares', sampling_args={'sample-points':self.sample_points, \
                                                                 'sample-outputs':self.sample_outputs})
             mypoly.set_model()
             self.full_space_poly = mypoly
         self.sample_points = standardise(self.full_space_poly.get_points())
         self.sample_outputs = self.full_space_poly.get_model_evaluations()
         self._get_active_subspace()
     elif self.method == 'variable-projection':
         self._get_variable_projection(None, None, tol, max_iter,
                                       subspace_init, False)
    def test_samples(self):
        """
        test if the method returns a function object for sampling interface
        """
        dimension = 3
        sampling_ratio = 3
        parameters = [Parameter(1, "gaussian")] * dimension
        basis = Basis("total order", [5] * dimension)

        induced_sampling = InducedSampling(parameters, basis, sampling_ratio,
                                           "qr")

        # Mock additive mixture sampling
        def func(array_):
            return np.array([1] * dimension, float)

        induced_sampling.additive_mixture_sampling = func
        quadrature_points = induced_sampling.samples()
        true_array = np.ones((dimension * sampling_ratio, dimension))
        assert_array_equal(quadrature_points, true_array)
Exemple #10
0
 def _build_model(self, S, f, del_k):
     """
     Constructs quadratic model for ``trust-region`` method
     """
     myParameters = [
         Parameter(distribution='uniform',
                   lower=S[0, i] - del_k,
                   upper=S[0, i] + del_k,
                   order=2) for i in range(S.shape[1])
     ]
     myBasis = Basis('total-order')
     my_poly = Poly(myParameters,
                    myBasis,
                    method='compressive-sensing',
                    sampling_args={
                        'sample-points': S,
                        'sample-outputs': f
                    })
     my_poly.set_model()
     return my_poly
Exemple #11
0
    def _set_statistics(self):
        """
        Private method that is used withn the statistics routines.

        """
        if self.statistics_object is None:
            if self.method != 'numerical-integration' and self.dimensions <= 6 and self.highest_order <= MAXIMUM_ORDER_FOR_STATS:
                quad = Quadrature(parameters=self.parameters, basis=Basis('tensor-grid', orders= np.array(self.parameters_order) + 1), \
                    mesh='tensor-grid', points=None)
                quad_pts, quad_wts = quad.get_points_and_weights()
                poly_vandermonde_matrix = self.get_poly(quad_pts)
            else:
                poly_vandermonde_matrix = self.get_poly(self._quadrature_points)
                quad_pts, quad_wts = self.get_points_and_weights()

            if self.highest_order <= MAXIMUM_ORDER_FOR_STATS:
                self.statistics_object = Statistics(self.parameters, self.basis,  self.coefficients,  quad_pts, \
                        quad_wts, poly_vandermonde_matrix, max_sobol_order=self.highest_order)
            else:
                self.statistics_object = Statistics(self.parameters, self.basis,  self.coefficients,  quad_pts, \
                        quad_wts, poly_vandermonde_matrix, max_sobol_order=MAXIMUM_ORDER_FOR_STATS)
def getPseudospectralCoefficients(self, function, override_orders=None):
    if override_orders is None:
        pts, wts = super(Polyint, self).getTensorQuadratureRule()
        tensor_elements = self.basis.elements
        P = super(Polyint, self).getPolynomial(pts)
    else:
        pts, wts = super(Polyint, self).getTensorQuadratureRule(override_orders)
        tensor_basis = Basis('Tensor grid', override_orders)
        tensor_elements = tensor_basis.elements
        P = super(Polyint, self).getPolynomial(pts, tensor_elements)

    m = len(wts)
    W = np.mat( np.diag(np.sqrt(wts)))
    A = np.mat(W * P.T)
    if callable(function):
        y = evalfunction(points=pts, function=function)
    else:
        y = function
    b = np.dot( W  ,  np.reshape(y, (m,1)) )
    coefficients = np.dot(A.T , b)  
    return coefficients, tensor_elements, pts, wts
Exemple #13
0
    def _set_statistics(self):
        """
        Private method that is used within the statistics routines.

        """
        if self.statistics_object is None:
            if hasattr(self, 'inv_R_Psi'):
                # quad_pts, quad_wts = self.quadrature.get_points_and_weights()
                N_quad = 20000
                quad_pts = self.corr.get_correlated_samples(N=N_quad)
                quad_wts = 1.0 / N_quad * np.ones(N_quad)
                poly_vandermonde_matrix = self.get_poly(quad_pts)
            elif self.method != 'numerical-integration' and self.dimensions <= 6 and self.highest_order <= MAXIMUM_ORDER_FOR_STATS:
                quad = Quadrature(parameters=self.parameters, basis=Basis('tensor-grid', orders= np.array(self.parameters_order) + 1), \
                    mesh='tensor-grid', points=None)
                quad_pts, quad_wts = quad.get_points_and_weights()
                poly_vandermonde_matrix = self.get_poly(quad_pts)
            elif self.mesh == 'monte-carlo':
                quad = Quadrature(parameters=self.parameters,
                                  basis=self.basis,
                                  mesh=self.mesh,
                                  points=None,
                                  oversampling=10.0)
                quad_pts, quad_wts = quad.get_points_and_weights()
                N_quad = len(quad_wts)
                quad_wts = 1.0 / N_quad * np.ones(N_quad)
                poly_vandermonde_matrix = self.get_poly(quad_pts)
            else:
                poly_vandermonde_matrix = self.get_poly(
                    self._quadrature_points)
                quad_pts, quad_wts = self.get_points_and_weights()

            if self.highest_order <= MAXIMUM_ORDER_FOR_STATS and (
                    self.basis.basis_type.lower() == 'total-order'
                    or self.basis.basis_type.lower() == 'hyperbolic-basis'):
                self.statistics_object = Statistics(self.parameters, self.basis,  self.coefficients,  quad_pts, \
                        quad_wts, poly_vandermonde_matrix, max_sobol_order=self.highest_order)
            else:
                self.statistics_object = Statistics(self.parameters, self.basis,  self.coefficients,  quad_pts, \
                        quad_wts, poly_vandermonde_matrix, max_sobol_order=MAXIMUM_ORDER_FOR_STATS)
Exemple #14
0
 def test_induced_jacobi_evaluation(self):
     dimension = 3
     parameters = [Parameter(1, "Uniform", upper=1, lower=-1)] * dimension
     basis = Basis("total-order")
     induced_sampling = Induced(parameters, basis)
     parameter = parameters[0]
     parameter.order = 3
     cdf_value = induced_sampling.induced_jacobi_evaluation(
         0, 0, 0, parameter)
     np.testing.assert_allclose(cdf_value, 0.5, atol=0.00001)
     cdf_value = induced_sampling.induced_jacobi_evaluation(
         0, 0, 1, parameter)
     assert cdf_value == 1
     cdf_value = induced_sampling.induced_jacobi_evaluation(
         0, 0, -1, parameter)
     assert cdf_value == 0
     cdf_value = induced_sampling.induced_jacobi_evaluation(
         0, 0, 0.6, parameter)
     np.testing.assert_allclose(cdf_value, 0.7462, atol=0.00005)
     cdf_value = induced_sampling.induced_jacobi_evaluation(
         0, 0, 0.999, parameter)
     np.testing.assert_allclose(cdf_value, 0.99652, atol=0.000005)
    def get_subspace_polynomial(self):
        """
        Returns a polynomial defined over the dimension reducing subspace.

        :param Subspaces self:
            An instance of the Subspaces object.

        :return:
            **subspacepoly**: A Poly object that defines a polynomial over the subspace. The distribution of parameters is
            assumed to be uniform and the maximum and minimum bounds for each parameter are defined by the maximum and minimum values
            of the project samples.
        """
        active_subspace = self._subspace[:, 0:self.subspace_dimension]
        projected_points = np.dot(self.sample_points, active_subspace)
        myparameters = []
        for i in range(0, self.subspace_dimension):
            param = Parameter(distribution='uniform', lower=np.min(projected_points[:,i]), upper=np.max(projected_points[:,i]), \
                order=self.polynomial_degree)
            myparameters.append(param)
        mybasis = Basis("total-order")
        subspacepoly = Poly(myparameters, mybasis, method='least-squares', sampling_args={'sample-points':projected_points, \
                                                                    'sample-outputs':self.sample_outputs})
        subspacepoly.set_model()
        return subspacepoly
Exemple #16
0
		def _fit_poly(X, y):

			N, d = X.shape
			myParameters = []

			for dimension in range(d):
				values = [X[i,dimension] for i in range(N)]
				values_min = min(values)
				values_max = max(values)

				if (values_min - values_max) ** 2 < 0.01:
					myParameters.append(Parameter(distribution='Uniform', lower=values_min-0.01, upper=values_max+0.01, order=self.order))
				else: 
					myParameters.append(Parameter(distribution='Uniform', lower=values_min, upper=values_max, order=self.order))
			myBasis = Basis('total-order')
			
			y = np.reshape(y, (y.shape[0], 1))

			poly = Poly(myParameters, myBasis, method='least-squares', sampling_args={'sample-points':X, 'sample-outputs':y})

			poly.set_model()

			mse = ((y-poly.get_polyfit(X))**2).mean()
			return mse, poly
Exemple #17
0
    def __init__(self, D=None, R=None):
        if D is None:
            raise(ValueError, 'Distributions must be given')
        else:
            self.D = D

        if R is None:
            raise(ValueError, 'Correlation matrix must be specified')
        else:
            self.R = R

        self.std = Parameter(order=5, distribution='normal',shape_parameter_A = 0.0, shape_parameter_B = 1.0)
        #
        #    R0 = fictive matrix of correlated normal intermediate variables
        #
        #    1) Check the type of correlated marginals
        #    2) Use Effective Quadrature for solving Legendre
        #    3) Calculate the fictive matrix

        inf_lim = -8.0
        sup_lim = - inf_lim
        p1 = Parameter(distribution = 'uniform', lower = inf_lim, upper = sup_lim, order = 31)
        myBasis = Basis('Tensor grid')
        Pols = Polyint([p1, p1], myBasis)
        p = Pols.quadraturePoints
        w = Pols.quadratureWeights * (sup_lim - inf_lim)**2

        p1 = p[:,0]
        p2 = p[:,1]

        R0 = np.eye((len(self.D)))
        for i in range(len(self.D)):
            for j in range(i+1, len(self.D), 1):
                if self.R[i,j] == 0:
                    R0[i,j] = 0.0
                else:
                  tp11 = -(np.array(self.D[i].getiCDF(self.std.getCDF(points=p1))) - self.D[i].mean ) / np.sqrt( self.D[i].variance )
                  tp22 = -(np.array(self.D[j].getiCDF(self.std.getCDF(points=p2))) -  self.D[j].mean)/np.sqrt( self.D[j].variance )

                  rho_ij = self.R[i,j]
                  bivariateNormalPDF = (1.0 / (2.0 * np.pi * np.sqrt(1.0-rho_ij**2)) * np.exp(-1.0/(2.0*(1.0 - rho_ij**2)) * (p1**2 - 2.0 * rho_ij * p1 * p2  + p2**2 )))
                  coefficientsIntegral = np.flipud(tp11*tp22 * w)

                  def check_difference(rho_ij):
                      bivariateNormalPDF = (1.0 / (2.0 * np.pi * np.sqrt(1.0-rho_ij**2)) * np.exp(-1.0/(2.0*(1.0 - rho_ij**2)) * (p1**2 - 2.0 * rho_ij * p1 * p2  + p2**2 )))
                      diff = np.dot(coefficientsIntegral, bivariateNormalPDF)
                      return diff - self.R[i,j]

                  if (self.D[i].name!='custom') or (self.D[j].name!='custom'):
                    rho = optimize.newton(check_difference, self.R[i,j], maxiter=50)
                  else:
                    res = optimize.least_squares(check_difference, R[i,j], bounds=(-0.999,0.999), ftol=1.e-03)
                    rho = res.x
                    print('A Custom Marginal is present')

                  R0[i,j] = rho
                  R0[j,i] = R0[i,j]

        self.A = np.linalg.cholesky(R0)
        print('The Cholesky decomposition of fictive matrix R0 is:')
        print(self.A)
        print('The fictive matrix is:')
        print(R0)
Exemple #18
0
    def _well_poised_LU(self, S, f, S_hat, f_hat):
        """
        Ensures the regression set is well-poised using the LU algorithm (proposed by Andrew Conn) for ``trust-region`` method
        """
        #       Poised constant of algorithm
        psi = 1.0
        #       Generate natural monomial basis
        Base = Basis('total-order', orders=np.tile([1], self.n))
        basis = Base.get_basis()[:, range(self.n - 1, -1, -1)]

        def natural_basis_function(x, basis):
            phi = np.zeros(basis.shape[0])
            for j in range(basis.shape[0]):
                phi[j] = 1.0
                for k in range(basis.shape[1]):
                    phi[j] *= (x[k]**basis[j, k]) / factorial(basis[j, k])
            return phi

        phi_function = lambda x: natural_basis_function(x, basis)
        #       Initialise U matrix of LU factorisation of M matrix (see Conn et al.)
        U = np.zeros((self.p, self.p))
        #       Initialise the first row of U to the e1 basis vector which corresponds to solution with all zeros
        U[0, 0] = 1.0
        #       Perform the LU factorisation algorithm for the rest of the points
        for k in range(1, self.p):
            v = np.zeros(self.p)
            for j in range(k):
                v[j] = -U[j, k] / U[j, j]
            v[k] = 1.0
            #           If there are still points to choose from, find if points meet criterion. If so, use the index to choose
            #           point with given index to be next point in regression/interpolation set
            if S_hat.size != 0:
                M = self._natural_basis_matrix(S_hat, v, phi_function)
                index2 = np.argmax(M)
                if M[index2] < psi:
                    index2 = None
            else:
                index2 = None
#           If index exists, choose the point with that index and delete it from possible choices
            if index2 is not None:
                s = S_hat[index2, :].flatten()
                S = np.vstack((S, s))
                f = np.vstack((f, f_hat[index2].flatten()))
                S_hat = np.delete(S_hat, index2, 0)
                f_hat = np.delete(f_hat, index2, 0)
                phi = phi_function(s.flatten())
#           If index doesn't exist, solve an optimisation point to find the point in the range which best satisfies criterion
            else:
                s = optimize.minimize(
                    lambda x: -abs(np.dot(v, phi_function(x.flatten()))),
                    np.zeros(self.n),
                    method='COBYLA',
                    constraints=[{
                        'type': 'ineq',
                        'fun': lambda x: 1.0 - x
                    }, {
                        'type': 'ineq',
                        'fun': lambda x: 1.0 + x
                    }],
                    options={'disp': False})['x'].flatten()
                S = np.vstack((S, s))
                f = np.vstack((f, np.array([np.inf])))
                phi = phi_function(s.flatten())
#           Update U factorisation in LU algorithm
            U[k, k] = np.dot(v, phi)
            for i in range(k + 1, self.p):
                U[k, i] += phi[i]
                for j in range(k):
                    U[k, i] -= (phi[j] * U[j, i]) / U[j, j]
        return S, f, S_hat, f_hat
Exemple #19
0
    def __init__(self, poly, correlation_matrix, verbose=False):
        self.poly = poly
        D = self.poly.get_parameters()
        self.D = D
        self.R = correlation_matrix
        self.std = Parameter(order=5,
                             distribution='normal',
                             shape_parameter_A=0.0,
                             shape_parameter_B=1.0)
        inf_lim = -8.0
        sup_lim = -inf_lim
        p1 = Parameter(distribution='uniform',
                       lower=inf_lim,
                       upper=sup_lim,
                       order=31)
        myBasis = Basis('tensor-grid')
        Pols = Poly([p1, p1], myBasis, method='numerical-integration')
        p = Pols.get_points()
        w = Pols.get_weights() * (sup_lim - inf_lim)**2
        p1 = p[:, 0]
        p2 = p[:, 1]
        R0 = np.eye((len(self.D)))
        for i in range(len(self.D)):
            for j in range(i + 1, len(self.D), 1):
                if self.R[i, j] == 0:
                    R0[i, j] = 0.0
                else:
                    tp11 = -(np.array(self.D[i].get_icdf(
                        self.std.get_cdf(points=p1))) -
                             self.D[i].mean) / np.sqrt(self.D[i].variance)
                    tp22 = -(np.array(self.D[j].get_icdf(
                        self.std.get_cdf(points=p2))) -
                             self.D[j].mean) / np.sqrt(self.D[j].variance)

                    rho_ij = self.R[i, j]
                    bivariateNormalPDF = (
                        1.0 / (2.0 * np.pi * np.sqrt(1.0 - rho_ij**2)) *
                        np.exp(-1.0 / (2.0 * (1.0 - rho_ij**2)) *
                               (p1**2 - 2.0 * rho_ij * p1 * p2 + p2**2)))
                    coefficientsIntegral = np.flipud(tp11 * tp22 * w)

                    def check_difference(rho_ij):
                        bivariateNormalPDF = (
                            1.0 / (2.0 * np.pi * np.sqrt(1.0 - rho_ij**2)) *
                            np.exp(-1.0 / (2.0 * (1.0 - rho_ij**2)) *
                                   (p1**2 - 2.0 * rho_ij * p1 * p2 + p2**2)))
                        diff = np.dot(coefficientsIntegral, bivariateNormalPDF)
                        return diff - self.R[i, j]

                    if (self.D[i].name != 'custom') or (self.D[j].name !=
                                                        'custom'):
                        rho = optimize.newton(check_difference,
                                              self.R[i, j],
                                              maxiter=50)
                    else:
                        res = optimize.least_squares(check_difference,
                                                     R[i, j],
                                                     bounds=(-0.999, 0.999),
                                                     ftol=1.e-03)
                        rho = res.x
                        print('A Custom Marginal is present')

                    R0[i, j] = rho
                    R0[j, i] = R0[i, j]

        self.A = np.linalg.cholesky(R0)
        if verbose is True:
            print('The Cholesky decomposition of fictive matrix R0 is:')
            print(self.A)
            print('The fictive matrix is:')
            print(R0)
        list_of_parameters = []
        for i in range(0, len(self.D)):
            standard_parameter = Parameter(order=self.D[i].order,
                                           distribution='gaussian',
                                           shape_parameter_A=0.,
                                           shape_parameter_B=1.)
            list_of_parameters.append(standard_parameter)
        self.polystandard = deepcopy(self.poly)
        self.polystandard._set_parameters(list_of_parameters)
        self.standard_samples = self.polystandard.get_points()
        self._points = self.get_correlated_from_uncorrelated(
            self.standard_samples)
Exemple #20
0
    def __init__(self,
                 sample_points,
                 sample_outputs,
                 num_ridges,
                 max_iters=1,
                 learning_rate=0.001,
                 W=None,
                 coeffs=None,
                 momentum_rate=.001,
                 opt='sd',
                 poly_deg=2,
                 verbose=False):
        self.sample_points = sample_points
        self.sample_outputs = sample_outputs
        self.verbose = verbose
        # network architecture params
        if isinstance(num_ridges, int):
            self.num_ridges = [num_ridges]
        else:
            self.num_ridges = num_ridges
        # num_ridges is the number of hidden units at each hidden layer. Does not count the input layer
        self.num_layers = len(self.num_ridges)
        self.dims = sample_points.shape[1]
        # initialize network data structures
        max_layer_size = max(self.num_ridges)
        self.poly_array = np.empty((self.num_layers, max_layer_size),
                                   dtype=object)
        #TODO: not hardcode poly type? Have different ridges at different nodes?
        for k in range(self.num_layers):
            for j in range(self.num_ridges[k]):
                self.poly_array[k, j] = Poly(
                    Parameter(order=poly_deg,
                              distribution='uniform',
                              lower=-3,
                              upper=3), Basis("total-order"))
        self.poly_card = self.poly_array[0, 0].basis.cardinality
        layer_sizes = [self.dims] + self.num_ridges
        if W is None:
            self.W = [
                np.random.randn(layer_sizes[k + 1], layer_sizes[k])
                for k in range(self.num_layers)
            ]
        else:
            self.W = W
        if coeffs is None:
            self.coeffs = [
                np.random.randn(self.num_ridges[k], self.poly_card)
                for k in range(self.num_layers)
            ]
        else:
            self.coeffs = coeffs

        self.update_coeffs()
        # Note: We will keep data for every input point in one array.
        n_points = self.sample_points.shape[0]
        self.delta = []
        for k in range(self.num_layers):
            self.delta.append(np.zeros((self.num_ridges[k], n_points)))
        self.act_mat = []  # Lambda
        for k in range(self.num_layers):
            self.act_mat.append(np.zeros((self.num_ridges[k], n_points)))
        self.Z = []  # node value before activation
        for k in range(self.num_layers):
            self.Z.append(np.zeros((self.num_ridges[k], n_points)))
        self.Y = []  # After activation
        for k in range(self.num_layers):
            self.Y.append(np.zeros((self.num_ridges[k], n_points)))
        self.phi = []  # basis fn evaluations
        for k in range(self.num_layers):
            self.phi.append(np.zeros((self.num_ridges[k], n_points)))
        self.evaluate_fit(self.sample_points, train=True)
        # optimization params
        self.max_iters = max_iters
        self.opt = opt
        self.learning_rate = learning_rate
        self.momentum_rate = momentum_rate
Exemple #21
0
    def _omorf(self, s_old, del_k, del_min, eta1, eta2, gam1, gam2, omega_s,
               max_evals, random_initial, epsilon, d, subspace_method):
        """
        Computes optimum using the ``omorf`` method
        """
        self.n = s_old.size
        self.s_old = self._apply_scaling(s_old)
        if del_k is None:
            if self.bounds is None:
                self.del_k = 0.1 * max(np.linalg.norm(self.s_old, ord=np.inf),
                                       1.0)
            else:
                self.del_k = 0.1
        else:
            self.del_k = del_k
        self._update_bounds()
        self.f_old = self._blackbox_evaluation(self.s_old)

        self.d = d
        self.q = int(comb(self.d + 2, 2))
        self.p = self.n + 1
        self.random_initial = random_initial
        self.subspace_method = subspace_method
        self.epsilon = epsilon

        Base = Basis('total-order', orders=np.tile([2], self.d))
        self.basis = Base.get_basis()[:, range(self.d - 1, -1, -1)]

        itermax = 10000
        # Construct the sample set
        S_full, f_full = self._generate_initial_set()
        self._calculate_subspace(S_full, f_full)
        S_red, f_red = self._sample_set('new')
        for i in range(itermax):
            # self._update_bounds()
            if len(self.f) >= max_evals or self.del_k < del_min:
                break
            my_poly = self._build_model(S_red, f_red)
            m_old = np.asscalar(my_poly.get_polyfit(np.dot(self.s_old,
                                                           self.U)))
            s_new, m_new = self._compute_step(my_poly)
            # Safety step implemented in BOBYQA
            if np.linalg.norm(s_new - self.s_old,
                              ord=np.inf) < omega_s * self.del_k:
                if max(np.linalg.norm(
                        S_full - self.s_old, axis=1,
                        ord=np.inf)) <= self.epsilon * self.del_k:
                    self._calculate_subspace(S_full, f_full)
                    S_red, f_red = self._sample_set('new')
                    self.del_k *= gam1
                elif max(np.linalg.norm(
                        S_red - self.s_old, axis=1,
                        ord=np.inf)) <= self.epsilon * self.del_k:
                    S_full, f_full = self._sample_set('improve',
                                                      S_full,
                                                      f_full,
                                                      full_space=True)
                    self._calculate_subspace(S_full, f_full)
                    S_red, f_red = self._sample_set('new')
                else:
                    S_red, f_red = self._sample_set('improve', S_red, f_red)
                    S_full, f_full = self._sample_set('improve',
                                                      S_full,
                                                      f_full,
                                                      full_space=True)
                continue
            if self.S.shape == np.unique(np.vstack((self.S, s_new)),
                                         axis=0).shape:
                ind_repeat = np.argmin(
                    np.linalg.norm(self.S - s_new, ord=np.inf, axis=1))
                f_new = self.f[ind_repeat]
            else:
                f_new = self._blackbox_evaluation(s_new)
            S_red = np.vstack((S_red, s_new))
            f_red = np.vstack((f_red, f_new))
            S_full = np.vstack((S_full, s_new))
            f_full = np.vstack((f_full, f_new))
            # Calculate trust-region factor
            rho_k = (self.f_old - f_new) / (m_old - m_new)
            self._choose_best(self.S, self.f)
            self._update_bounds()
            if len(self.f) >= max_evals or self.del_k < del_min:
                break
            if rho_k >= eta2:
                S_red, f_red = self._sample_set('replace', S_red, f_red)
                S_full, f_full = self._sample_set('replace', S_full, f_full)
                self.del_k *= gam2
            elif rho_k >= eta1:
                S_red, f_red = self._sample_set('replace', S_red, f_red)
                S_full, f_full = self._sample_set('replace', S_full, f_full)
            else:
                if max(np.linalg.norm(
                        S_full - self.s_old, axis=1,
                        ord=np.inf)) <= self.epsilon * self.del_k:
                    self._calculate_subspace(S_full, f_full)
                    S_red, f_red = self._sample_set('new')
                    self.del_k *= gam1
                elif max(np.linalg.norm(
                        S_red - self.s_old, axis=1,
                        ord=np.inf)) <= self.epsilon * self.del_k:
                    S_full, f_full = self._sample_set('improve',
                                                      S_full,
                                                      f_full,
                                                      full_space=True)
                    self._calculate_subspace(S_full, f_full)
                    S_red, f_red = self._sample_set('new')
                else:
                    S_red, f_red = self._sample_set('improve', S_red, f_red)
                    S_full, f_full = self._sample_set('improve',
                                                      S_full,
                                                      f_full,
                                                      full_space=True)
        self.S = self._remove_scaling(self.S)
        self._choose_best(self.S, self.f)
        return self.s_old, self.f_old
    def __init__(self,
                 correlation_matrix,
                 poly=None,
                 parameters=None,
                 method=None,
                 verbose=False):
        if (poly is None) and (method is not None):
            raise ValueError('Need to specify poly for probability transform.')
        if poly is not None:
            self.poly = poly
            D = self.poly.get_parameters()
        elif parameters is not None:
            D = parameters
        else:
            raise ValueError('Need to specify either poly or parameters.')
        self.D = D
        self.R = correlation_matrix
        self.std = Parameter(order=5,
                             distribution='normal',
                             shape_parameter_A=0.0,
                             shape_parameter_B=1.0)
        inf_lim = -8.0
        sup_lim = -inf_lim
        p1 = Parameter(distribution='uniform',
                       lower=inf_lim,
                       upper=sup_lim,
                       order=31)
        myBasis = Basis('tensor-grid')
        self.Pols = Poly([p1, p1], myBasis, method='numerical-integration')
        Pols = self.Pols
        p = Pols.get_points()
        # w = Pols.get_weights()
        w = Pols.get_weights() * (sup_lim - inf_lim)**2
        p1 = p[:, 0]
        p2 = p[:, 1]
        R0 = np.eye((len(self.D)))
        for i in range(len(self.D)):
            for j in range(i + 1, len(self.D), 1):
                if self.R[i, j] == 0:
                    R0[i, j] = 0.0
                else:
                    z1 = np.array(self.D[i].get_icdf(
                        self.std.get_cdf(points=p1)))
                    z2 = np.array(self.D[j].get_icdf(
                        self.std.get_cdf(points=p2)))

                    tp11 = (z1 - self.D[i].mean) / np.sqrt(self.D[i].variance)
                    tp22 = (z2 - self.D[j].mean) / np.sqrt(self.D[j].variance)

                    coefficientsIntegral = np.flipud(tp11 * tp22 * w)

                    def check_difference(rho_ij):
                        bivariateNormalPDF = (
                            1.0 / (2.0 * np.pi * np.sqrt(1.0 - rho_ij**2)) *
                            np.exp(-1.0 / (2.0 * (1.0 - rho_ij**2)) *
                                   (p1**2 - 2.0 * rho_ij * p1 * p2 + p2**2)))
                        diff = np.dot(coefficientsIntegral, bivariateNormalPDF)
                        return diff - self.R[i, j]

                    # if (self.D[i].name!='custom') or (self.D[j].name!='custom'):
                    rho = optimize.newton(check_difference,
                                          self.R[i, j],
                                          maxiter=50)
                    # else:
                    #     # ???
                    #     res = optimize.least_squares(check_difference, self.R[i,j], bounds=(-0.999,0.999), ftol=1.e-03)
                    #     rho = res.x
                    #     print('A Custom Marginal is present')

                    R0[i, j] = rho
                    R0[j, i] = R0[i, j]
        self.R0 = R0.copy()

        self.A = np.linalg.cholesky(R0)
        if verbose:
            print('The Cholesky decomposition of fictive matrix R0 is:')
            print(self.A)
            print('The fictive matrix is:')
            print(R0)

        if method is None:
            pass
        elif method.lower() == 'nataf-transform':
            list_of_parameters = []
            for i in range(0, len(self.D)):
                standard_parameter = Parameter(order=self.D[i].order,
                                               distribution='gaussian',
                                               shape_parameter_A=0.,
                                               shape_parameter_B=1.)
                list_of_parameters.append(standard_parameter)

            # have option so that we don't need to obtain
            self.corrected_poly = deepcopy(self.poly)

            if hasattr(self.corrected_poly, '_quadrature_points'):
                self.corrected_poly._set_parameters(list_of_parameters)
                self.standard_samples = self.corrected_poly._quadrature_points
                self._points = self.get_correlated_samples(
                    X=self.standard_samples)
                # self.corrected_poly._quadrature_points = self._points.copy()
        elif method.lower() == 'gram-schmidt':
            basis_card = poly.basis.cardinality
            oversampling = 10

            N_Psi = oversampling * basis_card
            S_samples = self.get_correlated_samples(N=N_Psi)
            w_weights = 1.0 / N_Psi * np.ones(N_Psi)
            Psi = poly.get_poly(S_samples).T
            WPsi = np.diag(np.sqrt(w_weights)) @ Psi
            self.WPsi = WPsi

            R_Psi = np.linalg.qr(WPsi)[1]

            self.R_Psi = R_Psi
            self.R_Psi[0, :] *= np.sign(self.R_Psi[0, 0])
            self.corrected_poly = deepcopy(poly)
            self.corrected_poly.inv_R_Psi = np.linalg.inv(self.R_Psi)
            self.corrected_poly.corr = self
            self.corrected_poly._set_points_and_weights()

            P = self.corrected_poly.get_poly(
                self.corrected_poly._quadrature_points)
            W = np.mat(
                np.diag(np.sqrt(self.corrected_poly._quadrature_weights)))
            A = W * P.T
            self.corrected_poly.A = A
            self.corrected_poly.P = P

            if hasattr(self.corrected_poly, '_quadrature_points'):
                # TODO: Correlated quadrature points?
                self._points = self.corrected_poly._quadrature_points
        else:
            raise ValueError('Invalid method for correlations.')
    def fit(self, X, y):
        """
                Fits the tree to the provided data

                :param PolyTree self:
                    An instance of the PolyTree class.
                :param numpy.ndarray X:
                        Training input data
                :param numpy.ndarray y:
                        Training output data
                """
        def _build_tree():

            global index_node_global

            def _splitter(node):
                # Extract data
                X, y = node["data"]
                depth = node["depth"]
                N, d = X.shape

                # Find feature splits that might improve loss
                did_split = False
                if self.splitting_criterion == "model_aware":
                    loss_best = node["loss"]
                elif self.splitting_criterion == "model_agnostic" or self.splitting_criterion == "loss_gradient":
                    loss_best = np.inf
                else:
                    raise Exception("invalid splitting_criterion")
                data_best = None
                polys_best = None
                j_feature_best = None
                threshold_best = None

                if self.verbose:
                    polys_fit = 0

                # Perform threshold split search only if node has not hit max depth
                if (depth >= 0) and (depth < self.max_depth):
                    if self.splitting_criterion != "loss_gradient":

                        for j_feature in range(d):

                            last_threshold = np.inf

                            if self.search == 'exhaustive':
                                threshold_search = X[:, j_feature]
                            elif self.search == 'grid':
                                if self.samples > N:
                                    samples = N
                                else:
                                    samples = self.samples
                                threshold_search = np.linspace(
                                    np.min(X[:, j_feature]),
                                    np.max(X[:, j_feature]),
                                    num=samples)
                            else:
                                raise Exception(
                                    'Incorrect search type! Must be \'exhaustive\' or \'grid\''
                                )

                            # Perform threshold split search on j_feature
                            for threshold in np.unique(
                                    np.sort(threshold_search)):

                                # Split data based on threshold
                                (X_left, y_left), (X_right,
                                                   y_right) = self._split_data(
                                                       j_feature, threshold, X,
                                                       y)
                                #print(j_feature, threshold, X_left, X_right)
                                N_left, N_right = len(X_left), len(X_right)

                                # Do not attempt to split if split conditions not satisfied
                                if not (N_left >= self.min_samples_leaf
                                        and N_right >= self.min_samples_leaf):
                                    continue

                                # Compute weight loss function
                                if self.splitting_criterion == "model_aware":
                                    loss_left, poly_left = _fit_poly(
                                        X_left, y_left)
                                    loss_right, poly_right = _fit_poly(
                                        X_right, y_right)

                                    loss_split = (N_left * loss_left +
                                                  N_right * loss_right) / N

                                    if self.verbose: polys_fit += 2

                                elif self.splitting_criterion == "model_agnostic":
                                    loss_split = np.std(
                                        y) - (N_left * np.std(y_left) +
                                              N_right * np.std(y_right)) / N

                                # Update best parameters if loss is lower
                                if loss_split < loss_best:
                                    did_split = True
                                    loss_best = loss_split
                                    if self.splitting_criterion == "model_aware":
                                        polys_best = [poly_left, poly_right]
                                    data_best = [(X_left, y_left),
                                                 (X_right, y_right)]
                                    j_feature_best = j_feature
                                    threshold_best = threshold

                    # Gradient based splitting criterion from ref. [2]
                    else:
                        # Fit a single poly to parent node
                        loss, poly = _fit_poly(X, y)

                        # Now run the splitting algo using gradients from this poly
                        did_split, j_feature_best, threshold_best = self._find_split_from_grad(
                            poly, X, y.reshape(-1, 1))

                # If model_agnostic or gradient based, fit poly's to children now we have split
                if self.splitting_criterion != "model_aware" and did_split:
                    (X_left, y_left), (X_right, y_right) = self._split_data(
                        j_feature_best, threshold_best, X, y)
                    loss_left, poly_left = _fit_poly(X_left, y_left)
                    loss_right, poly_right = _fit_poly(X_right, y_right)
                    N_left, N_right = len(X_left), len(X_right)
                    loss_best = (N_left * loss_left + N_right * loss_right) / N
                    polys_best = [poly_left, poly_right]
                    if self.splitting_criterion == "loss_gradient":
                        data_best = [(X_left, y_left), (X_right, y_right)]

                    if self.verbose: polys_fit += 2

                if self.verbose and did_split:
                    print(
                        "Node (X.shape = {}) fitted with {} polynomials generated"
                        .format(X.shape, polys_fit))
                elif self.verbose:
                    print(
                        "Node (X.shape = {}) failed to fit after {} polynomials generated"
                        .format(X.shape, polys_fit))

                if did_split and depth > self.actual_max_depth:
                    self.actual_max_depth = depth

                # Return the best result
                result = {
                    "did_split": did_split,
                    "loss": loss_best,
                    "polys": polys_best,
                    "data": data_best,
                    "j_feature": j_feature_best,
                    "threshold": threshold_best,
                    "N": N
                }

                return result

            def _fit_poly(X, y):

                #                                try:

                N, d = X.shape
                myParameters = []

                for dimension in range(d):
                    values = X[:, dimension]
                    values_min = np.amin(values)
                    values_max = np.amax(values)

                    if (values_min - values_max)**2 < 0.01:
                        myParameters.append(
                            Parameter(distribution='Uniform',
                                      lower=values_min - 0.01,
                                      upper=values_max + 0.01,
                                      order=self.order))
                    else:
                        myParameters.append(
                            Parameter(distribution='Uniform',
                                      lower=values_min,
                                      upper=values_max,
                                      order=self.order))
                if self.basis == "hyperbolic-basis":
                    myBasis = Basis(self.basis,
                                    orders=[self.order for _ in range(d)],
                                    q=0.5)
                else:
                    myBasis = Basis(self.basis,
                                    orders=[self.order for _ in range(d)])
                container["index_node_global"] += 1
                poly = Poly(myParameters,
                            myBasis,
                            method=self.poly_method,
                            sampling_args={
                                'sample-points': X,
                                'sample-outputs': y
                            },
                            solver_args=self.poly_solver_args)
                poly.set_model()

                mse = np.linalg.norm(y -
                                     poly.get_polyfit(X).reshape(-1))**2 / N
                #                                except Exception as e:
                #                                        print("Warning fitting of Poly failed:", e)
                #                                        print(d, values_min, values_max)
                #                                        mse, poly = np.inf, None

                return mse, poly

            def _create_node(X, y, depth, container):
                poly_loss, poly = _fit_poly(X, y)

                node = {
                    "name": "node",
                    "index": container["index_node_global"],
                    "loss": poly_loss,
                    "poly": poly,
                    "data": (X, y),
                    "n_samples": len(X),
                    "j_feature": None,
                    "threshold": None,
                    "children": {
                        "left": None,
                        "right": None
                    },
                    "depth": depth,
                    "flag": False
                }
                container["index_node_global"] += 1

                return node

            def _split_traverse_node(node, container):

                result = _splitter(node)
                if not result["did_split"]:
                    return

                node["j_feature"] = result["j_feature"]
                node["threshold"] = result["threshold"]

                del node["data"]

                (X_left, y_left), (X_right, y_right) = result["data"]
                poly_left, poly_right = result["polys"]

                node["children"]["left"] = _create_node(
                    X_left, y_left, node["depth"] + 1, container)
                node["children"]["right"] = _create_node(
                    X_right, y_right, node["depth"] + 1, container)
                node["children"]["left"]["poly"] = poly_left
                node["children"]["right"]["poly"] = poly_right

                # Split nodes
                _split_traverse_node(node["children"]["left"], container)
                _split_traverse_node(node["children"]["right"], container)

            container = {"index_node_global": 0}
            root = _create_node(X, y, 0, container)
            _split_traverse_node(root, container)

            return root

        N, d = X.shape
        if self.basis == "hyperbolic-basis":
            self.cardinality = Basis(self.basis,
                                     orders=[self.order for _ in range(d)],
                                     q=0.5).get_cardinality()
        else:
            self.cardinality = Basis(self.basis,
                                     orders=[self.order for _ in range(d)
                                             ]).get_cardinality()
        if self.min_samples_leaf == None or self.min_samples_leaf == self.cardinality:
            self.min_samples_leaf = int(np.ceil(self.cardinality * 1.25))
        elif self.cardinality > self.min_samples_leaf:
            print(
                "WARNING: Basis cardinality ({}) greater than the minimum samples per leaf ({}). This may cause reduced performance."
                .format(self.cardinality, self.min_samples_leaf))

        self.tree = _build_tree()
    def __init__(self,
                 method,
                 full_space_poly=None,
                 sample_points=None,
                 sample_outputs=None,
                 subspace_dimension=2,
                 polynomial_degree=2,
                 param_args=None,
                 poly_args=None,
                 dr_args=None):
        self.full_space_poly = full_space_poly
        self.sample_points = sample_points
        self.Y = None  # for the zonotope vertices
        self.sample_outputs = sample_outputs
        self.method = method
        self.subspace_dimension = subspace_dimension
        self.polynomial_degree = polynomial_degree

        my_poly_args = {'method': 'least-squares', 'solver_args': {}}
        if poly_args is not None:
            my_poly_args.update(poly_args)
        self.poly_args = my_poly_args

        my_param_args = {
            'distribution': 'uniform',
            'order': self.polynomial_degree,
            'lower': -1,
            'upper': 1
        }
        if param_args is not None:
            my_param_args.update(param_args)

        # I suppose we can detect if lower and upper is present to decide between these categories?
        bounded_distrs = [
            'analytical', 'beta', 'chebyshev', 'arcsine', 'truncated-gaussian',
            'uniform'
        ]
        unbounded_distrs = [
            'gaussian', 'normal', 'gumbel', 'logistic', 'students-t',
            'studentst'
        ]
        semi_bounded_distrs = [
            'chi', 'chi-squared', 'exponential', 'gamma', 'lognormal',
            'log-normal', 'pareto', 'rayleigh', 'weibull'
        ]

        if dr_args is not None:
            if 'standardize' in dr_args:
                dr_args['standardise'] = dr_args['standardize']

        if self.method.lower() == 'active-subspace' or self.method.lower(
        ) == 'active-subspaces':
            self.method = 'active-subspace'
            if dr_args is not None:
                self.standardise = getattr(dr_args, 'standardise', True)
            else:
                self.standardise = True

            if self.full_space_poly is None:
                # user provided input/output data
                N, d = self.sample_points.shape
                if self.standardise:
                    self.data_scaler = scaler_minmax()
                    self.data_scaler.fit(self.sample_points)
                    self.std_sample_points = self.data_scaler.transform(
                        self.sample_points)
                else:
                    self.std_sample_points = self.sample_points.copy()
                param = Parameter(**my_param_args)
                if param_args is not None:
                    if (hasattr(dr_args, 'lower')
                            or hasattr(dr_args, 'upper')) and self.standardise:
                        warnings.warn(
                            'Points standardised but parameter range provided. Overriding default ([-1,1])...',
                            UserWarning)
                myparameters = [param for _ in range(d)]
                mybasis = Basis("total-order")
                mypoly = Poly(myparameters,
                              mybasis,
                              sampling_args={
                                  'sample-points': self.std_sample_points,
                                  'sample-outputs': self.sample_outputs
                              },
                              **my_poly_args)
                mypoly.set_model()
                self.full_space_poly = mypoly
            else:
                # User provided polynomial
                # Standardise according to distribution specified. Only care about the scaling (not shift)
                # TODO: user provided callable with parameters?
                user_params = self.full_space_poly.parameters
                d = len(user_params)
                self.sample_points = self.full_space_poly.get_points()
                if self.standardise:
                    scale_factors = np.zeros(d)
                    centers = np.zeros(d)
                    for dd, p in enumerate(user_params):
                        if p.name.lower() in bounded_distrs:
                            scale_factors[dd] = (p.upper - p.lower) / 2.0
                            centers[dd] = (p.upper + p.lower) / 2.0
                        elif p.name.lower() in unbounded_distrs:
                            scale_factors[dd] = np.sqrt(p.variance)
                            centers[dd] = p.mean
                        else:
                            scale_factors[dd] = np.sqrt(p.variance)
                            centers[dd] = 0.0
                    self.param_scaler = scaler_custom(centers, scale_factors)
                    self.std_sample_points = self.param_scaler.transform(
                        self.sample_points)
                else:
                    self.std_sample_points = self.sample_points.copy()
                if not hasattr(self.full_space_poly, 'coefficients'):
                    raise ValueError('Please call set_model() first on poly.')

            self.sample_outputs = self.full_space_poly.get_model_evaluations()
            # TODO: use dr_args for resampling of gradient points
            as_args = {'grad_points': None}
            if dr_args is not None:
                as_args.update(dr_args)
            self._get_active_subspace(**as_args)
        elif self.method == 'variable-projection':
            self.data_scaler = scaler_minmax()
            self.data_scaler.fit(self.sample_points)
            self.std_sample_points = self.data_scaler.transform(
                self.sample_points)

            if dr_args is not None:
                vp_args = {
                    'gamma': 0.1,
                    'beta': 1e-4,
                    'tol': 1e-7,
                    'maxiter': 1000,
                    'U0': None,
                    'verbose': False
                }
                vp_args.update(dr_args)
                self._get_variable_projection(**vp_args)
            else:
                self._get_variable_projection()
    def _trust_region(self, s_old, del_k, del_min, eta1, eta2, gam1, gam2, omega_s, max_evals, random_initial, scale_bounds, epsilon):
        """
        Computes optimum using the ``trust-region`` method
        """
        itermax = 10000
        self.n = s_old.size
        self.q = int(comb(self.n+2, 2))
        self.p = int(comb(self.n+2, 2))
        self.random_initial = random_initial
        self.scale_bounds = scale_bounds
        self.epsilon = epsilon
        Base = Basis('total-order', orders=np.tile([2], self.n))
        self.basis = Base.get_basis()[:,range(self.n-1, -1, -1)]

        self.s_old = self._apply_scaling(s_old)
        self.f_old = self._blackbox_evaluation(self.s_old)
        if del_k is None:
            if self.bounds is None:
                self.del_k = 0.1*max(np.linalg.norm(self.s_old, ord=np.inf), 1.0)
            else:
                self.del_k = 0.1
        else:
            self.del_k = del_k
        self._update_bounds()

        # Construct the sample set
        S, f = self._generate_initial_set()
        for i in range(itermax):
            # print(self.s_old)
            # print('-------------')
            self._update_bounds()
            if len(self.f) >= max_evals or self.del_k < del_min:
                break
            my_poly = self._build_model(S, f)
            m_old = np.asscalar(my_poly.get_polyfit(self.s_old))
            s_new, m_new = self._compute_step(my_poly)
            # Safety step implemented in BOBYQA
            if np.linalg.norm(s_new - self.s_old, ord=np.inf) < omega_s*self.del_k:
                S, f = self._sample_set('improve', S, f)
                if max(np.linalg.norm(S-self.s_old, axis=1, ord=np.inf)) <= self.epsilon*self.del_k:
                    self.del_k *= gam1
                continue
            elif self.S.shape == np.unique(np.vstack((self.S, s_new)), axis=0).shape:
                ind_repeat = np.argmin(np.linalg.norm(self.S - s_new, ord=np.inf, axis=1))
                f_new = self.f[ind_repeat]
            else:
                f_new = self._blackbox_evaluation(s_new)
            S = np.vstack((S, s_new))
            f = np.vstack((f, f_new))
            # Calculate trust-region factor
            rho_k = (self.f_old - f_new) / (m_old - m_new)
            self._choose_best(self.S, self.f)
            self._update_bounds()
            if len(self.f) >= max_evals or self.del_k < del_min:
                break
            if rho_k >= eta2:
                S, f = self._sample_set('replace', S, f)
                self.del_k *= gam2
            elif rho_k >= eta1:
                S, f = self._sample_set('replace', S, f)
            else:
                if max(np.linalg.norm(S-self.s_old, axis=1, ord=np.inf)) <= self.epsilon*self.del_k:
                    S, f = self._sample_set('improve', S, f)
                    self.del_k *= gam1
                else:
                    S, f = self._sample_set('improve', S, f)
        self.S = self._remove_scaling(self.S)
        self._choose_best(self.S, self.f)
        return self.s_old, self.f_old