def _calculate_subspace(self, S, f):
     parameters = [Parameter(distribution='uniform', lower=np.min(S[:,i]), upper=np.max(S[:,i]), order=1) for i in range(0, self.n)]
     self.poly = Poly(parameters, basis=Basis('total-order'), method='least-squares', \
                  sampling_args={'sample-points': S, 'sample-outputs': f})
     self.poly.set_model()
     self.Subs = Subspaces(full_space_poly=self.poly, method='active-subspace', subspace_dimension=self.d)
     if self.subspace_method == 'variable-projection':
         U0 = self.Subs.get_subspace()[:,:self.d]
         self.Subs = Subspaces(method='variable-projection', sample_points=S, sample_outputs=f, \
                 subspace_init=U0, subspace_dimension=self.d, polynomial_degree=2, max_iter=300, tol=1.0e-8)
         self.U = self.Subs.get_subspace()[:, :self.d]
     elif self.subspace_method == 'active-subspaces':
         U0 = self.Subs.get_subspace()[:,1].reshape(-1,1)
         U1 = null_space(U0.T)
         self.U = U0
         for i in range(self.d-1):
             R = []
             for j in range(U1.shape[1]):
                 U = np.hstack((self.U, U1[:, j].reshape(-1,1)))
                 Y = np.dot(S, U)
                 myParameters = [Parameter(distribution='uniform', lower=np.min(Y[:,k]), upper=np.max(Y[:,k]), \
                         order=2) for k in range(Y.shape[1])]
                 myBasis = Basis('total-order')
                 poly = Poly(myParameters, myBasis, method='least-squares', \
                         sampling_args={'sample-points':Y, 'sample-outputs':f})
                 poly.set_model()
                 f_eval = poly.get_polyfit(Y)
                 _,_,r,_,_ = linregress(f_eval.flatten(),f.flatten()) 
                 R.append(r**2)
             index = np.argmax(R)
             self.U = np.hstack((self.U, U1[:, index].reshape(-1,1)))
             U1 = np.delete(U1, index, 1)
Exemplo n.º 2
0
			def _fit_poly(X, y):

				try:
					N, d = X.shape
					myParameters = []

					for dimension in range(d):
						values = X[:,dimension]
						values_min = np.amin(values)
						values_max = np.amax(values)

						if (values_min - values_max) ** 2 < 0.01:
							myParameters.append(Parameter(distribution='Uniform', lower=values_min-0.01, upper=values_max+0.01, order=self.order))
						else: 
							myParameters.append(Parameter(distribution='Uniform', lower=values_min, upper=values_max, order=self.order))
					if self.basis == "hyperbolic-basis":
						myBasis = Basis(self.basis, orders=[self.order for _ in range(d)], q=0.5)
					else:
						myBasis = Basis(self.basis, orders=[self.order for _ in range(d)])
					container["index_node_global"] += 1
					poly = Poly(myParameters, myBasis, method=self.poly_method, sampling_args={'sample-points':X, 'sample-outputs':y}, solver_args=self.poly_solver_args)
					poly.set_model()
					
					mse = np.linalg.norm(y - poly.get_polyfit(X).reshape(-1)) ** 2 / N
				except Exception as e:
					print("Warning fitting of Poly failed:", e)
					print(d, values_min, values_max)
					mse, poly = np.inf, None

				return mse, poly
    def test_multi_variate_sampling(self):
        """
        test if the method returns a function object for sampling interface
        """
        dimension = 3
        sampling_ratio = 3
        parameters = [Parameter(1, "gaussian")] * dimension
        basis = Basis("total order", [5] * dimension)

        induced_sampling = InducedSampling(parameters, basis, sampling_ratio,
                                           "qr")

        # Mock univariate sampling
        def func(_input):
            if isinstance(_input, tuple) and len(_input) == 3:

                parameter = _input[0]
                cdf = _input[1]
                order = _input[2]
                assert parameter.__class__ == Parameter
                assert cdf.shape == (1, )
                assert cdf < 1 and cdf > 0
                # Check integer property
                assert order - int(order) < 0.0001
                assert order <= 5 and order >= 0
                assert type(np.asscalar(order)) == float
                return 1
            else:
                return 0

        induced_sampling.univariate_sampling = func
        quadrature_points = induced_sampling.samples()
        true_array = np.ones((dimension * sampling_ratio, dimension))
        assert_array_equal(quadrature_points, true_array)
Exemplo n.º 4
0
    def get_subspace_polynomial(self):
        """
        Returns a polynomial defined over the dimension reducing subspace.

        :param Subspaces self:
            An instance of the Subspaces object.

        :return:
            **subspacepoly**: A Poly object that defines a polynomial over the subspace. The distribution of parameters is
            assumed to be uniform and the maximum and minimum bounds for each parameter are defined by the maximum and minimum values
            of the project samples.
        """
        active_subspace = self._subspace[:, 0:self.subspace_dimension]
        projected_points = np.dot(self.sample_points, active_subspace)
        myparameters = []
        for i in range(0, self.subspace_dimension):
            param = Parameter(distribution='uniform', lower=np.min(projected_points[:,i]), upper=np.max(projected_points[:,i]), \
                order=self.polynomial_degree)
            myparameters.append(param)
        mybasis = Basis("total-order")
        subspacepoly = Poly(myparameters, mybasis, method=self.poly_method, sampling_args={'sample-points':projected_points, \
                                                                    'sample-outputs':self.sample_outputs},
                                                                    solver_args=self.solver_args)
        subspacepoly.set_model()
        return subspacepoly
Exemplo n.º 5
0
 def convert2Full(poly_minimal):
     """
     Converts a Poly_minimal object to a full Poly object, running through the constructor.
     This gives some basic functionalities such as evaluating the polynomial fit and gradients.
     Note: This method is not compliant with the EQ v8.0 philosophy, and is just temporary.
     :param poly_minimal: A Poly_minimal object
     :return: poly object
     """
     basis = Basis(poly_minimal.basis_type, poly_minimal.basis_orders,
                   poly_minimal.basis_level, poly_minimal.basis_growth_rule,
                   poly_minimal.basis_q)
     num_params = len(poly_minimal.orders)
     parameters = []
     for i in range(num_params):
         parameters.append(
             Parameter(poly_minimal.orders[i],
                       poly_minimal.distributions[i],
                       endpoints=poly_minimal.endpoints[i],
                       shape_parameter_A=poly_minimal.shape_parameter_As[i],
                       shape_parameter_B=poly_minimal.shape_parameter_Bs[i],
                       lower=poly_minimal.lowers[i],
                       upper=poly_minimal.uppers[i],
                       data=poly_minimal.datas[i]))
     poly = Poly(parameters, basis)
     if hasattr(poly_minimal, 'coefficients'):
         poly.__setCoefficients__(poly_minimal.coefficients)
     if hasattr(poly_minimal, 'quadraturePoints'):
         poly.__setQuadrature__(poly_minimal.quadraturePoints,
                                poly_minimal.quadratureWeights)
     return poly
Exemplo n.º 6
0
 def get_subspace_polynomial(self):
     """ Returns a polynomial defined over the dimension reducing subspace.
     
     Returns
     -------
     Poly
         A Poly object that defines a polynomial over the subspace. The distribution of parameters
         is assumed to be uniform and the maximum and minimum bounds for each parameter are defined by the maximum
         and minimum values of the project samples.
     """
     # TODO: Try correlated poly here
     active_subspace = self._subspace[:, 0:self.subspace_dimension]
     projected_points = np.dot(self.std_sample_points, active_subspace)
     myparameters = []
     for i in range(0, self.subspace_dimension):
         param = Parameter(distribution='uniform',
                           lower=np.min(projected_points[:, i]),
                           upper=np.max(projected_points[:, i]),
                           order=self.polynomial_degree)
         myparameters.append(param)
     mybasis = Basis("total-order")
     subspacepoly = Poly(myparameters,
                         mybasis,
                         method='least-squares',
                         sampling_args={
                             'sample-points': projected_points,
                             'sample-outputs': self.sample_outputs
                         })
     subspacepoly.set_model()
     return subspacepoly
Exemplo n.º 7
0
def vandermonde(eta, p):
    """
    Internal function to variable_projection
    Calculates the Vandermonde matrix using polynomial basis functions

    :param eta: ndarray, the affine transformed projected values of inputs in active subspace
    :param p: int, the maximum degree of polynomials
    :return:
        * **V (numpy array)**: The resulting Vandermode matrix
        * **Polybasis (Poly object)**: An instance of Poly object containing the polynomial basis derived
    """
    _, n = eta.shape
    listing = []
    for i in range(0, n):
        listing.append(p)
    Object = Basis('Total order', listing)
    #Establish n Parameter objects
    params = []
    P = Parameter(order=p, lower=-1, upper=1, distribution='uniform')
    for i in range(0, n):
        params.append(P)
    #Use the params list to establish the Poly object
    Polybasis = Poly(params, Object)
    V = Polybasis.getPolynomial(eta)
    V = V.T
    return V, Polybasis
    def test_additive_mixture_sampling(self):
        """
        test if the method returns a function object for sampling interface
        """
        dimension = 3
        sampling_ratio = 3
        parameters = [Parameter(1, "gaussian")] * dimension
        basis = Basis("total order", [5] * dimension)

        induced_sampling = InducedSampling(parameters, basis, sampling_ratio,
                                           "qr")

        # Mock multi_variate_sampling
        def func(sampled_cdf_values, index_set_used):
            return sampled_cdf_values, index_set_used

        induced_sampling.multi_variate_sampling = func
        _placeholder = np.ones((dimension, 1))
        cdf, index = induced_sampling.additive_mixture_sampling(_placeholder)
        assert type(cdf) == np.ndarray
        assert cdf.shape == (dimension, 1)
        assert cdf.dtype == 'float64'
        assert np.amax(cdf) < 1
        assert np.amin(cdf) > 0
        assert type(index) == np.ndarray
        assert index.shape == (3, )
        index_int = index.astype(int)
        assert np.all(np.isclose(index, index_int, 0.0001))
        # Check Total order
        assert np.sum(index) <= 5
        assert np.amax(index) <= 5
        assert np.amin(index) >= 0
Exemplo n.º 9
0
 def __init__(self, method, full_space_poly=None, sample_points=None, sample_outputs=None, polynomial_degree=2, subspace_dimension=2, bootstrap=False, subspace_init=None, max_iter=1000, tol=None, poly_method='least-squares',solver_args=None):
     self.full_space_poly = full_space_poly
     self.sample_points = sample_points
     self.Y = None # for the zonotope vertices
     if self.sample_points is not None:
         self.sample_points = standardise(sample_points)
     self.sample_outputs = sample_outputs
     self.method = method
     self.subspace_dimension = subspace_dimension
     self.polynomial_degree = polynomial_degree
     self.bootstrap = bootstrap
     self.poly_method = poly_method
     self.solver_args = solver_args
     if self.method.lower() == 'active-subspace' or self.method.lower() == 'active-subspaces':
         self.method = 'active-subspace'
         if self.full_space_poly is None:
             N, d = self.sample_points.shape
             param = Parameter(distribution='uniform', lower=-1, upper=1., order=self.polynomial_degree)
             myparameters = [param for _ in range(d)]
             mybasis = Basis("total-order")
             mypoly = Poly(myparameters, mybasis, method=self.poly_method, sampling_args={'sample-points':self.sample_points, \
                                                                 'sample-outputs':self.sample_outputs},
                                                                 solver_args=self.solver_args)
             mypoly.set_model()
             self.full_space_poly = mypoly
         self.sample_points = standardise(self.full_space_poly.get_points())
         self.sample_outputs = self.full_space_poly.get_model_evaluations()
         self._get_active_subspace()
     elif self.method == 'variable-projection':
         self._get_variable_projection(None,None,tol,max_iter,subspace_init,False)
Exemplo n.º 10
0
    def __init__(self, training_input, training_output, num_ridges, max_iters=1, learning_rate = 0.001,
                 W=None, coeffs=None, momentum_rate = .001, opt = 'sd', poly_deg = 2, verbose = False):
        self.training_input = training_input
        self.training_output = training_output
        self.verbose = verbose
        # network architecture params
        if isinstance(num_ridges, int):
            self.num_ridges = [num_ridges]
        else:
            self.num_ridges = num_ridges

        # num_ridges is the number of hidden units at each hidden layer. Does not count the input layer
        self.num_layers = len(self.num_ridges)

        self.dims = training_input.shape[1]
        # initialize network data structures
        max_layer_size = max(self.num_ridges)
        self.poly_array = np.empty(( self.num_layers, max_layer_size), dtype=object)
        #TODO: not hardcode poly type? Have different ridges at different nodes?
        for k in range(self.num_layers):
            for j in range(self.num_ridges[k]):
                self.poly_array[k,j] = Poly(Parameter(poly_deg, distribution='uniform', lower=-3, upper=3), Basis("total order"))
        self.poly_card = self.poly_array[0,0].basis.cardinality

        layer_sizes = [self.dims] + self.num_ridges
        if W is None:
            self.W = [np.random.randn(layer_sizes[k+1], layer_sizes[k]) for k in range(self.num_layers)]
        else:
            self.W = W
        if coeffs is None:
            self.coeffs = [np.random.randn(self.num_ridges[k], self.poly_card) for k in range(self.num_layers)]
        else:
            self.coeffs = coeffs

        self.update_coeffs()
        # Note: We will keep data for every input point in one array.
        n_points = self.training_input.shape[0]
        self.delta = []
        for k in range(self.num_layers):
            self.delta.append(np.zeros((self.num_ridges[k],n_points)))
        self.act_mat = [] # Lambda
        for k in range(self.num_layers):
            self.act_mat.append(np.zeros((self.num_ridges[k], n_points)))
        self.Z = [] # node value before activation
        for k in range(self.num_layers):
            self.Z.append(np.zeros((self.num_ridges[k],n_points)))
        self.Y = [] # After activation
        for k in range(self.num_layers):
            self.Y.append(np.zeros((self.num_ridges[k],n_points)))
        self.phi = [] # basis fn evaluations
        for k in range(self.num_layers):
            self.phi.append(np.zeros((self.num_ridges[k],n_points)))

        self.evaluate_fit(self.training_input,train=True)
        # optimization params
        self.max_iters = max_iters
        self.opt = opt
        self.learning_rate = learning_rate
        self.momentum_rate = momentum_rate
Exemplo n.º 11
0
 def _build_model(self, S, f):
     """
     Constructs quadratic model for ``trust-region`` or ``omorf`` methods
     """
     if self.method == 'trust-region':
         myParameters = [Parameter(distribution='uniform', lower=np.min(S[:,i]), \
                 upper=np.max(S[:,i]), order=2) for i in range(self.n)]
         myBasis = Basis('total-order')
         my_poly = Poly(myParameters, myBasis, method='least-squares', \
                 sampling_args={'sample-points':S, 'sample-outputs':f})
     elif self.method == 'omorf':
         Y = np.dot(S, self.U)
         myParameters = [Parameter(distribution='uniform', lower=np.min(Y[:,i]), \
                 upper=np.max(Y[:,i]), order=2) for i in range(self.d)]
         myBasis = Basis('total-order')
         my_poly = Poly(myParameters, myBasis, method='least-squares', \
                 sampling_args={'sample-points':Y, 'sample-outputs':f})
     my_poly.set_model()
     return my_poly
Exemplo n.º 12
0
 def _get_quadrature_points_and_weights(self, order):
     param = Parameter(distribution='uniform',
                       lower=self.lower,
                       upper=self.upper,
                       order=order)
     basis = Basis('univariate')
     poly = Poly(method='numerical-integration',
                 parameters=param,
                 basis=basis)
     points, weights = poly.get_points_and_weights()
     return points, weights * (self.upper - self.lower)
Exemplo n.º 13
0
    def test_induced_sampling(self):
        """
        An integration test for the whole routine
        """
        dimension = 3
        parameters = [Parameter(3, "Uniform", upper=1, lower=-1)]*dimension
        basis = Basis("total-order", [3]*dimension)

        induced_sampling = Induced(parameters, basis)

        quadrature_points = induced_sampling.get_points()
        assert quadrature_points.shape == (induced_sampling.samples_number, 3)
Exemplo n.º 14
0
 def test_generate_sampling_class(self):
     """
     test if the method returns a function object for sampling interface
     """
     parameters = [Parameter(1, "gaussian")] * 3
     basis = Basis("total order")
     generator_class = Sampling(parameters, basis,
                                ('induced-sampling', {
                                    "sampling-ratio": 2,
                                    "subsampling-optimisation": 'qr'
                                }))
     assert generator_class.sampling_class.__class__ == InducedSampling
Exemplo n.º 15
0
		def _fit_poly(X, y):

			N, d = X.shape
			myParameters = []

			for dimension in range(d):
				values = [X[i,dimension] for i in range(N)]
				values_min = min(values)
				values_max = max(values)

				if (values_min - values_max) ** 2 < 0.01:
					myParameters.append(Parameter(distribution='Uniform', lower=values_min-0.01, upper=values_max+0.01, order=self.order))
				else: 
					myParameters.append(Parameter(distribution='Uniform', lower=values_min, upper=values_max, order=self.order))
			myBasis = Basis('total-order')
			
			y = np.reshape(y, (y.shape[0], 1))

			poly = Poly(myParameters, myBasis, method='least-squares', sampling_args={'sample-points':X, 'sample-outputs':y})

			poly.set_model()

			mse = ((y-poly.get_polyfit(X))**2).mean()
			return mse, poly
Exemplo n.º 16
0
    def test_sampling(self):
        d = 4
        order = 5
        param = Parameter(distribution='uniform',
                          order=order,
                          lower=-1.0, upper=1.0)
        myparameters = [param for _ in range(d)]
        mybasis = Basis('total-order')
        mypoly = Poly(myparameters, mybasis,
                      method='least-squares',
                      sampling_args={'mesh': 'induced',
                                     'subsampling-algorithm': 'qr',
                                     'sampling-ratio': 1})

        assert mypoly._quadrature_points.shape == (mypoly.basis.cardinality, d)
Exemplo n.º 17
0
def vandermonde(eta, p):
    _, n = eta.shape
    listing = []
    for i in range(0, n):
        listing.append(p)
    Object = Basis('total-order', listing)
    #Establish n Parameter objects
    params = []
    P = Parameter(order=p, lower=-1, upper=1, distribution='uniform')
    for i in range(0, n):
        params.append(P)
    #Use the params list to establish the Poly object
    Polybasis = Poly(params, Object, method='least-squares')
    V = Polybasis.get_poly(eta)
    V = V.T
    return V, Polybasis
Exemplo n.º 18
0
 def _build_model(self, S, f, del_k):
     """
     Constructs quadratic model for ``trust-region`` method
     """
     myParameters = [
         Parameter(distribution='uniform',
                   lower=S[0, i] - del_k,
                   upper=S[0, i] + del_k,
                   order=2) for i in range(S.shape[1])
     ]
     myBasis = Basis('total-order')
     my_poly = Poly(myParameters,
                    myBasis,
                    method='compressive-sensing',
                    sampling_args={
                        'sample-points': S,
                        'sample-outputs': f
                    })
     my_poly.set_model()
     return my_poly
    def test_samples(self):
        """
        test if the method returns a function object for sampling interface
        """
        dimension = 3
        sampling_ratio = 3
        parameters = [Parameter(1, "gaussian")] * dimension
        basis = Basis("total order", [5] * dimension)

        induced_sampling = InducedSampling(parameters, basis, sampling_ratio,
                                           "qr")

        # Mock additive mixture sampling
        def func(array_):
            return np.array([1] * dimension, float)

        induced_sampling.additive_mixture_sampling = func
        quadrature_points = induced_sampling.samples()
        true_array = np.ones((dimension * sampling_ratio, dimension))
        assert_array_equal(quadrature_points, true_array)
Exemplo n.º 20
0
 def test_induced_jacobi_evaluation(self):
     dimension = 3
     parameters = [Parameter(1, "Uniform", upper=1, lower=-1)] * dimension
     basis = Basis("total-order")
     induced_sampling = Induced(parameters, basis)
     parameter = parameters[0]
     parameter.order = 3
     cdf_value = induced_sampling.induced_jacobi_evaluation(
         0, 0, 0, parameter)
     np.testing.assert_allclose(cdf_value, 0.5, atol=0.00001)
     cdf_value = induced_sampling.induced_jacobi_evaluation(
         0, 0, 1, parameter)
     assert cdf_value == 1
     cdf_value = induced_sampling.induced_jacobi_evaluation(
         0, 0, -1, parameter)
     assert cdf_value == 0
     cdf_value = induced_sampling.induced_jacobi_evaluation(
         0, 0, 0.6, parameter)
     np.testing.assert_allclose(cdf_value, 0.7462, atol=0.00005)
     cdf_value = induced_sampling.induced_jacobi_evaluation(
         0, 0, 0.999, parameter)
     np.testing.assert_allclose(cdf_value, 0.99652, atol=0.000005)
Exemplo n.º 21
0
    def __init__(self,
                 method,
                 full_space_poly=None,
                 sample_points=None,
                 sample_outputs=None,
                 subspace_dimension=2,
                 polynomial_degree=2,
                 param_args=None,
                 poly_args=None,
                 dr_args=None):
        self.full_space_poly = full_space_poly
        self.sample_points = sample_points
        self.Y = None  # for the zonotope vertices
        self.sample_outputs = sample_outputs
        self.method = method
        self.subspace_dimension = subspace_dimension
        self.polynomial_degree = polynomial_degree

        my_poly_args = {'method': 'least-squares', 'solver_args': {}}
        if poly_args is not None:
            my_poly_args.update(poly_args)
        self.poly_args = my_poly_args

        my_param_args = {
            'distribution': 'uniform',
            'order': self.polynomial_degree,
            'lower': -1,
            'upper': 1
        }
        if param_args is not None:
            my_param_args.update(param_args)

        # I suppose we can detect if lower and upper is present to decide between these categories?
        bounded_distrs = [
            'analytical', 'beta', 'chebyshev', 'arcsine', 'truncated-gaussian',
            'uniform'
        ]
        unbounded_distrs = [
            'gaussian', 'normal', 'gumbel', 'logistic', 'students-t',
            'studentst'
        ]
        semi_bounded_distrs = [
            'chi', 'chi-squared', 'exponential', 'gamma', 'lognormal',
            'log-normal', 'pareto', 'rayleigh', 'weibull'
        ]

        if dr_args is not None:
            if 'standardize' in dr_args:
                dr_args['standardise'] = dr_args['standardize']

        if self.method.lower() == 'active-subspace' or self.method.lower(
        ) == 'active-subspaces':
            self.method = 'active-subspace'
            if dr_args is not None:
                self.standardise = getattr(dr_args, 'standardise', True)
            else:
                self.standardise = True

            if self.full_space_poly is None:
                # user provided input/output data
                N, d = self.sample_points.shape
                if self.standardise:
                    self.data_scaler = scaler_minmax()
                    self.data_scaler.fit(self.sample_points)
                    self.std_sample_points = self.data_scaler.transform(
                        self.sample_points)
                else:
                    self.std_sample_points = self.sample_points.copy()
                param = Parameter(**my_param_args)
                if param_args is not None:
                    if (hasattr(dr_args, 'lower')
                            or hasattr(dr_args, 'upper')) and self.standardise:
                        warnings.warn(
                            'Points standardised but parameter range provided. Overriding default ([-1,1])...',
                            UserWarning)
                myparameters = [param for _ in range(d)]
                mybasis = Basis("total-order")
                mypoly = Poly(myparameters,
                              mybasis,
                              sampling_args={
                                  'sample-points': self.std_sample_points,
                                  'sample-outputs': self.sample_outputs
                              },
                              **my_poly_args)
                mypoly.set_model()
                self.full_space_poly = mypoly
            else:
                # User provided polynomial
                # Standardise according to distribution specified. Only care about the scaling (not shift)
                # TODO: user provided callable with parameters?
                user_params = self.full_space_poly.parameters
                d = len(user_params)
                self.sample_points = self.full_space_poly.get_points()
                if self.standardise:
                    scale_factors = np.zeros(d)
                    centers = np.zeros(d)
                    for dd, p in enumerate(user_params):
                        if p.name.lower() in bounded_distrs:
                            scale_factors[dd] = (p.upper - p.lower) / 2.0
                            centers[dd] = (p.upper + p.lower) / 2.0
                        elif p.name.lower() in unbounded_distrs:
                            scale_factors[dd] = np.sqrt(p.variance)
                            centers[dd] = p.mean
                        else:
                            scale_factors[dd] = np.sqrt(p.variance)
                            centers[dd] = 0.0
                    self.param_scaler = scaler_custom(centers, scale_factors)
                    self.std_sample_points = self.param_scaler.transform(
                        self.sample_points)
                else:
                    self.std_sample_points = self.sample_points.copy()
                if not hasattr(self.full_space_poly, 'coefficients'):
                    raise ValueError('Please call set_model() first on poly.')

            self.sample_outputs = self.full_space_poly.get_model_evaluations()
            # TODO: use dr_args for resampling of gradient points
            as_args = {'grad_points': None}
            if dr_args is not None:
                as_args.update(dr_args)
            self._get_active_subspace(**as_args)
        elif self.method == 'variable-projection':
            self.data_scaler = scaler_minmax()
            self.data_scaler.fit(self.sample_points)
            self.std_sample_points = self.data_scaler.transform(
                self.sample_points)

            if dr_args is not None:
                vp_args = {
                    'gamma': 0.1,
                    'beta': 1e-4,
                    'tol': 1e-7,
                    'maxiter': 1000,
                    'U0': None,
                    'verbose': False
                }
                vp_args.update(dr_args)
                self._get_variable_projection(**vp_args)
            else:
                self._get_variable_projection()
Exemplo n.º 22
0
class Nataf(object):
    """
    The class defines a Nataf transformation.
    References for theory:
        Melchers, R., E. (Robert E.), 1945- Structural reliability analysis
        and predictions - 2nd edition - John Wiley & Sons Ltd.

    The input correlated marginals are mapped from their physical space to a new
    standard normal space, in which points are uncorrelated.

    Attributes of the class:
    :param list D:
            List of parameters (distributions), interpreted here as the marginals.
    :param numpy-matrix R:
            The correlation matrix associated with the joint distribution.
    :param object std:
            A standard normal distribution
    :param numpy-matrix A:
            The Cholesky decomposition of Fictive matrix R0,
            associated with the set of normal intermediate
            correlated distributions.
    """
    def __init__(self, D=None, R=None):
        if D is None:
            raise(ValueError, 'Distributions must be given')
        else:
            self.D = D

        if R is None:
            raise(ValueError, 'Correlation matrix must be specified')
        else:
            self.R = R

        self.std = Parameter(order=5, distribution='normal',shape_parameter_A = 0.0, shape_parameter_B = 1.0)
        #
        #    R0 = fictive matrix of correlated normal intermediate variables
        #
        #    1) Check the type of correlated marginals
        #    2) Use Effective Quadrature for solving Legendre
        #    3) Calculate the fictive matrix

        inf_lim = -8.0
        sup_lim = - inf_lim
        p1 = Parameter(distribution = 'uniform', lower = inf_lim, upper = sup_lim, order = 31)
        myBasis = Basis('Tensor grid')
        Pols = Polyint([p1, p1], myBasis)
        p = Pols.quadraturePoints
        w = Pols.quadratureWeights * (sup_lim - inf_lim)**2

        p1 = p[:,0]
        p2 = p[:,1]

        R0 = np.eye((len(self.D)))
        for i in range(len(self.D)):
            for j in range(i+1, len(self.D), 1):
                if self.R[i,j] == 0:
                    R0[i,j] = 0.0
                else:
                  tp11 = -(np.array(self.D[i].getiCDF(self.std.getCDF(points=p1))) - self.D[i].mean ) / np.sqrt( self.D[i].variance )
                  tp22 = -(np.array(self.D[j].getiCDF(self.std.getCDF(points=p2))) -  self.D[j].mean)/np.sqrt( self.D[j].variance )

                  rho_ij = self.R[i,j]
                  bivariateNormalPDF = (1.0 / (2.0 * np.pi * np.sqrt(1.0-rho_ij**2)) * np.exp(-1.0/(2.0*(1.0 - rho_ij**2)) * (p1**2 - 2.0 * rho_ij * p1 * p2  + p2**2 )))
                  coefficientsIntegral = np.flipud(tp11*tp22 * w)

                  def check_difference(rho_ij):
                      bivariateNormalPDF = (1.0 / (2.0 * np.pi * np.sqrt(1.0-rho_ij**2)) * np.exp(-1.0/(2.0*(1.0 - rho_ij**2)) * (p1**2 - 2.0 * rho_ij * p1 * p2  + p2**2 )))
                      diff = np.dot(coefficientsIntegral, bivariateNormalPDF)
                      return diff - self.R[i,j]

                  if (self.D[i].name!='custom') or (self.D[j].name!='custom'):
                    rho = optimize.newton(check_difference, self.R[i,j], maxiter=50)
                  else:
                    res = optimize.least_squares(check_difference, R[i,j], bounds=(-0.999,0.999), ftol=1.e-03)
                    rho = res.x
                    print('A Custom Marginal is present')

                  R0[i,j] = rho
                  R0[j,i] = R0[i,j]

        self.A = np.linalg.cholesky(R0)
        print('The Cholesky decomposition of fictive matrix R0 is:')
        print(self.A)
        print('The fictive matrix is:')
        print(R0)

    def C2U(self, X):
        """  Method for mapping correlated variables to a new standard space.
             The imput matrix must have [Nxm] dimension, where m is the number
             of correlated marginals.

             :param numpy-matrix X:
                    A N-by-M Matrix where input marginals are organized along columns
                    M represents the number of correlated marginals
             :return:
                    A N-by-M Matrix which contains standardized uncorrelated data.
                    The transformation of each i-th input marginal is stored along
                    the i-th column of the output matrix.
        """
        c = X[:,0]
        w1 = np.zeros((len(c),len(self.D)))
        for i in range(len(self.D)):
            for j in range(len(c)):
                w1[j,i] = self.D[i].getCDF(points=X[j,i])
                if (w1[j,i] >= 1.0):
                    w1[j,i] = 1.0 - 10**(-10)
                elif (w1[j,i] <= 0.0):
                    w1[j,i] = 0.0 + 10**(-10)

        #-----------------------------------------------#
        #plt.figure()
        #plt.grid(linewidth=0.5, color='k')
        #plt.plot(X[:,0], w1[:,0], 'ro', label='first')
        #plt.plot(X[:,1], w1[:,1], 'bx', label='second')
        #plt.title('from nataf class: w1 VS X input')
        #plt.legend(loc='upper left')
        #plt.show()
        #-----------------------------------------------#
        sU = np.zeros((len(c),len(self.D)))
        for i in range(len(self.D)):
            for j in range(len(c)):
                sU[j,i] = self.std.getiCDF(w1[j,i])

        sU = np.array(sU)
        sU = sU.T

        xu = np.linalg.solve(self.A,sU)
        xu = np.array(xu)
        xu = xu.T

        return xu

    def U2C(self, X):
        """ Method for mapping uncorrelated variables from standard normal space
            to a new physical space in which variables are correlated.
            Input matrix must have [mxN] dimension, where m is the number of input marginals.

            :param numpy-matrix X:
                    A Matrix of M-by-N dimensions, in which uncorrelated marginals
                    are organized along rows.
            :return:
                    A N-by-M matrix in which the result of the inverse transformation
                    applied to the i-th marginal is stored along the i-th column
                    of the ouput matrix.
        """
        X = X.T

        invA = np.linalg.inv(self.A)
        Z = np.linalg.solve(invA, X)
        Z = Z.T

        xc = np.zeros((len(Z[:,0]), len(self.D)))
        for i in range(len(self.D)):
            for j in range(len(Z[:,0])):
                xc[j,i] = self.std.getCDF(points=Z[j,i])
        Xc = np.zeros((len(Z[:,0]),len(self.D)))
        for i in range(len(self.D)):
            for j in range(len(Z[:,0])):
                temporary = np.matrix(xc[j,i])
                temp = self.D[i].getiCDF(temporary)

                t = temp[0]
                Xc[j,i] = t
        return Xc

    def getUncorrelatedSamples(self, N=None):
        """ Method for sampling uncorrelated data:

            :param integer N:
                    represents the number of the samples inside a range
            :return:
                    A N-by-M matrix, each i-th column contains the points
                    which belong to the i-th distribution stored into list D.
        """
        if N is not None:
            distro = list()
            for i in range(len(self.D)):
                    distro1 = self.D[i].getSamples(N)

                    # check dimensions ------------------#
                    distro1 = np.matrix(distro1)
                    dimension = np.shape(distro1)
                    if dimension[0] == N:
                        distro1 = distro1.T
                    #------------------------------------#
                    distro.append(distro1)

            distro = np.reshape(distro, (len(self.D),N))
            distro = distro.T

        else:
             raise(ValueError, 'One input must be given to "get Correlated Samples" method')
        return distro

    def getCorrelatedSamples(self, N=None):
        """ Method for sampling correlated data:

            :param integer N:
                represents the number of the samples inside a range
                points represents the array we want to correlate.

            :return:
                A N-by-M matrix in which correlated samples are organized
                along columns: the result of the run of the present method
                for the i-th marginal into the input matrix is stored
                along the i-th column of the output matrix.
        """
        if N is not None:

            distro = list()
            for i in range(len(self.D)):
                    distro1 = self.std.getSamples(N)

                    # check dimensions ------------------#
                    distro1 = np.matrix(distro1)
                    dimension = np.shape(distro1)
                    if dimension[0] == N:
                        distro1 = distro1.T
                    #------------------------------------#
                    distro.append(distro1)

            distro = np.reshape(distro, (len(self.D),N))
            interm = np.dot(self.A, distro)
            correlated = np.zeros((len(self.D),N))
            for i in range(len(self.D)):
                for j in range(N):
                    correlated[i,j] = self.D[i].getiCDF(self.std.getCDF(interm[i,j]))
            correlated = correlated.T
            return correlated

        else:
             raise(ValueError, 'One input must be given to "get Correlated Samples" method: please choose between sampling N points or giving an array of uncorrelated data ')

    def CorrelationMatrix(self, X):
        """ The following calculations check the correlation
            matrix of input arrays and determine the covariance
            matrix: The input matrix mush have [Nxm] dimensions where
            m is the number of the marginals.

            :param X:
                Matrix of correlated data
            :param D:
                diagonal matrix which cointains the variances
            :param S:
                covariance matrix
            :return:
                A correlation matrix R
        """
        N = len(X)
        D = np.zeros((len(self.D),len(self.D)))
        for i in range(len(self.D)):
            for j in range(len(self.D)):
                if i==j:
                    D[i,j] = np.sqrt(self.D[i].variance)
                else:
                    D[i,j] = 0
        diff1 = np.zeros((N, len(self.D))) # (x_j - mu_j)
        diff2 = np.zeros((N, len(self.D))) # (x_k - mu_k)
        prod_n = np.zeros(N)
        prod_square1 = np.zeros(N)
        prod_square2 = np.zeros(N)

        R = np.zeros((len(self.D),len(self.D)))
        for j in range(len(self.D)):
            for k in range(len(self.D)):
                if j==k:
                    R[j,k] = 1.0
                else:
                    for i in range(N):
                        diff1[i,j] = (X[i,j] - self.D[j].mean)
                        diff2[i,k] = (X[i,k] - self.D[k].mean)
                        prod_n[i]  = 1.0*(diff1[i,j]*diff2[i,k])
                        prod_square1[i] = (diff1[i,j])**2
                        prod_square2[i] = (diff2[i,k])**2

                    den1   = np.sum(prod_square1)
                    den2   = np.sum(prod_square2)
                    den11  = np.sqrt(den1)
                    den22  = np.sqrt(den2)
                    R[j,k] = np.sum(prod_n)/(den11*den22)

        return R
Exemplo n.º 23
0
    def __init__(self, D=None, R=None):
        if D is None:
            raise(ValueError, 'Distributions must be given')
        else:
            self.D = D

        if R is None:
            raise(ValueError, 'Correlation matrix must be specified')
        else:
            self.R = R

        self.std = Parameter(order=5, distribution='normal',shape_parameter_A = 0.0, shape_parameter_B = 1.0)
        #
        #    R0 = fictive matrix of correlated normal intermediate variables
        #
        #    1) Check the type of correlated marginals
        #    2) Use Effective Quadrature for solving Legendre
        #    3) Calculate the fictive matrix

        inf_lim = -8.0
        sup_lim = - inf_lim
        p1 = Parameter(distribution = 'uniform', lower = inf_lim, upper = sup_lim, order = 31)
        myBasis = Basis('Tensor grid')
        Pols = Polyint([p1, p1], myBasis)
        p = Pols.quadraturePoints
        w = Pols.quadratureWeights * (sup_lim - inf_lim)**2

        p1 = p[:,0]
        p2 = p[:,1]

        R0 = np.eye((len(self.D)))
        for i in range(len(self.D)):
            for j in range(i+1, len(self.D), 1):
                if self.R[i,j] == 0:
                    R0[i,j] = 0.0
                else:
                  tp11 = -(np.array(self.D[i].getiCDF(self.std.getCDF(points=p1))) - self.D[i].mean ) / np.sqrt( self.D[i].variance )
                  tp22 = -(np.array(self.D[j].getiCDF(self.std.getCDF(points=p2))) -  self.D[j].mean)/np.sqrt( self.D[j].variance )

                  rho_ij = self.R[i,j]
                  bivariateNormalPDF = (1.0 / (2.0 * np.pi * np.sqrt(1.0-rho_ij**2)) * np.exp(-1.0/(2.0*(1.0 - rho_ij**2)) * (p1**2 - 2.0 * rho_ij * p1 * p2  + p2**2 )))
                  coefficientsIntegral = np.flipud(tp11*tp22 * w)

                  def check_difference(rho_ij):
                      bivariateNormalPDF = (1.0 / (2.0 * np.pi * np.sqrt(1.0-rho_ij**2)) * np.exp(-1.0/(2.0*(1.0 - rho_ij**2)) * (p1**2 - 2.0 * rho_ij * p1 * p2  + p2**2 )))
                      diff = np.dot(coefficientsIntegral, bivariateNormalPDF)
                      return diff - self.R[i,j]

                  if (self.D[i].name!='custom') or (self.D[j].name!='custom'):
                    rho = optimize.newton(check_difference, self.R[i,j], maxiter=50)
                  else:
                    res = optimize.least_squares(check_difference, R[i,j], bounds=(-0.999,0.999), ftol=1.e-03)
                    rho = res.x
                    print('A Custom Marginal is present')

                  R0[i,j] = rho
                  R0[j,i] = R0[i,j]

        self.A = np.linalg.cholesky(R0)
        print('The Cholesky decomposition of fictive matrix R0 is:')
        print(self.A)
        print('The fictive matrix is:')
        print(R0)
Exemplo n.º 24
0
    def __init__(self, poly, correlation_matrix, verbose=False):
        self.poly = poly
        D = self.poly.get_parameters()
        self.D = D
        self.R = correlation_matrix
        self.std = Parameter(order=5,
                             distribution='normal',
                             shape_parameter_A=0.0,
                             shape_parameter_B=1.0)
        inf_lim = -8.0
        sup_lim = -inf_lim
        p1 = Parameter(distribution='uniform',
                       lower=inf_lim,
                       upper=sup_lim,
                       order=31)
        myBasis = Basis('tensor-grid')
        Pols = Poly([p1, p1], myBasis, method='numerical-integration')
        p = Pols.get_points()
        w = Pols.get_weights() * (sup_lim - inf_lim)**2
        p1 = p[:, 0]
        p2 = p[:, 1]
        R0 = np.eye((len(self.D)))
        for i in range(len(self.D)):
            for j in range(i + 1, len(self.D), 1):
                if self.R[i, j] == 0:
                    R0[i, j] = 0.0
                else:
                    tp11 = -(np.array(self.D[i].get_icdf(
                        self.std.get_cdf(points=p1))) -
                             self.D[i].mean) / np.sqrt(self.D[i].variance)
                    tp22 = -(np.array(self.D[j].get_icdf(
                        self.std.get_cdf(points=p2))) -
                             self.D[j].mean) / np.sqrt(self.D[j].variance)

                    rho_ij = self.R[i, j]
                    bivariateNormalPDF = (
                        1.0 / (2.0 * np.pi * np.sqrt(1.0 - rho_ij**2)) *
                        np.exp(-1.0 / (2.0 * (1.0 - rho_ij**2)) *
                               (p1**2 - 2.0 * rho_ij * p1 * p2 + p2**2)))
                    coefficientsIntegral = np.flipud(tp11 * tp22 * w)

                    def check_difference(rho_ij):
                        bivariateNormalPDF = (
                            1.0 / (2.0 * np.pi * np.sqrt(1.0 - rho_ij**2)) *
                            np.exp(-1.0 / (2.0 * (1.0 - rho_ij**2)) *
                                   (p1**2 - 2.0 * rho_ij * p1 * p2 + p2**2)))
                        diff = np.dot(coefficientsIntegral, bivariateNormalPDF)
                        return diff - self.R[i, j]

                    if (self.D[i].name != 'custom') or (self.D[j].name !=
                                                        'custom'):
                        rho = optimize.newton(check_difference,
                                              self.R[i, j],
                                              maxiter=50)
                    else:
                        res = optimize.least_squares(check_difference,
                                                     R[i, j],
                                                     bounds=(-0.999, 0.999),
                                                     ftol=1.e-03)
                        rho = res.x
                        print('A Custom Marginal is present')

                    R0[i, j] = rho
                    R0[j, i] = R0[i, j]

        self.A = np.linalg.cholesky(R0)
        if verbose is True:
            print('The Cholesky decomposition of fictive matrix R0 is:')
            print(self.A)
            print('The fictive matrix is:')
            print(R0)
        list_of_parameters = []
        for i in range(0, len(self.D)):
            standard_parameter = Parameter(order=self.D[i].order,
                                           distribution='gaussian',
                                           shape_parameter_A=0.,
                                           shape_parameter_B=1.)
            list_of_parameters.append(standard_parameter)
        self.polystandard = deepcopy(self.poly)
        self.polystandard._set_parameters(list_of_parameters)
        self.standard_samples = self.polystandard.get_points()
        self._points = self.get_correlated_from_uncorrelated(
            self.standard_samples)
Exemplo n.º 25
0
class Correlations(object):
    """
    The class defines a Nataf transformation. The input correlated marginals are mapped from their physical space to a new
    standard normal space, in which points are uncorrelated.

    :param Poly poly: A polynomial object.
    :param numpy.ndarray correlation_matrix: The correlation matrix associated with the joint distribution.

    **References**
        1. Melchers, R. E., (1945) Structural Reliability Analysis and Predictions. John Wiley and Sons, second edition.

    """
    def __init__(self, poly, correlation_matrix, verbose=False):
        self.poly = poly
        D = self.poly.get_parameters()
        self.D = D
        self.R = correlation_matrix
        self.std = Parameter(order=5,
                             distribution='normal',
                             shape_parameter_A=0.0,
                             shape_parameter_B=1.0)
        inf_lim = -8.0
        sup_lim = -inf_lim
        p1 = Parameter(distribution='uniform',
                       lower=inf_lim,
                       upper=sup_lim,
                       order=31)
        myBasis = Basis('tensor-grid')
        Pols = Poly([p1, p1], myBasis, method='numerical-integration')
        p = Pols.get_points()
        w = Pols.get_weights() * (sup_lim - inf_lim)**2
        p1 = p[:, 0]
        p2 = p[:, 1]
        R0 = np.eye((len(self.D)))
        for i in range(len(self.D)):
            for j in range(i + 1, len(self.D), 1):
                if self.R[i, j] == 0:
                    R0[i, j] = 0.0
                else:
                    tp11 = -(np.array(self.D[i].get_icdf(
                        self.std.get_cdf(points=p1))) -
                             self.D[i].mean) / np.sqrt(self.D[i].variance)
                    tp22 = -(np.array(self.D[j].get_icdf(
                        self.std.get_cdf(points=p2))) -
                             self.D[j].mean) / np.sqrt(self.D[j].variance)

                    rho_ij = self.R[i, j]
                    bivariateNormalPDF = (
                        1.0 / (2.0 * np.pi * np.sqrt(1.0 - rho_ij**2)) *
                        np.exp(-1.0 / (2.0 * (1.0 - rho_ij**2)) *
                               (p1**2 - 2.0 * rho_ij * p1 * p2 + p2**2)))
                    coefficientsIntegral = np.flipud(tp11 * tp22 * w)

                    def check_difference(rho_ij):
                        bivariateNormalPDF = (
                            1.0 / (2.0 * np.pi * np.sqrt(1.0 - rho_ij**2)) *
                            np.exp(-1.0 / (2.0 * (1.0 - rho_ij**2)) *
                                   (p1**2 - 2.0 * rho_ij * p1 * p2 + p2**2)))
                        diff = np.dot(coefficientsIntegral, bivariateNormalPDF)
                        return diff - self.R[i, j]

                    if (self.D[i].name != 'custom') or (self.D[j].name !=
                                                        'custom'):
                        rho = optimize.newton(check_difference,
                                              self.R[i, j],
                                              maxiter=50)
                    else:
                        res = optimize.least_squares(check_difference,
                                                     R[i, j],
                                                     bounds=(-0.999, 0.999),
                                                     ftol=1.e-03)
                        rho = res.x
                        print('A Custom Marginal is present')

                    R0[i, j] = rho
                    R0[j, i] = R0[i, j]

        self.A = np.linalg.cholesky(R0)
        if verbose is True:
            print('The Cholesky decomposition of fictive matrix R0 is:')
            print(self.A)
            print('The fictive matrix is:')
            print(R0)
        list_of_parameters = []
        for i in range(0, len(self.D)):
            standard_parameter = Parameter(order=self.D[i].order,
                                           distribution='gaussian',
                                           shape_parameter_A=0.,
                                           shape_parameter_B=1.)
            list_of_parameters.append(standard_parameter)
        self.polystandard = deepcopy(self.poly)
        self.polystandard._set_parameters(list_of_parameters)
        self.standard_samples = self.polystandard.get_points()
        self._points = self.get_correlated_from_uncorrelated(
            self.standard_samples)

    def get_points(self):
        """
        Returns the correlated samples based on the quadrature rules used in poly.

        :param Correlations self: An instance of the Correlations object.

        :return:
            **points**: A numpy.ndarray of sampled quadrature points with shape (number_of_samples, dimension).

        """
        return self._points

    def set_model(self, model=None, model_grads=None):
        """
        Computes the coefficients of the polynomial.

        :param Correlations self:
            An instance of the Correlations class.
        :param callable model:
            The function that needs to be approximated. In the absence of a callable function, the input can be the function evaluated at the quadrature points.
        :param callable model_grads:
            The gradient of the function that needs to be approximated. In the absence of a callable gradient function, the input can be a matrix of gradient evaluations at the quadrature points.
        """
        model_values = None
        model_grads_values = None
        if callable(model):
            model_values = evaluate_model(self._points, model)
        else:
            model_values = model
        if model_grads is not None:
            if callable(model_grads):
                model_grads_values = evaluate_model_gradients(
                    self._points, model_grads)
            else:
                model_grads_values = model_grads
        self.polystandard.set_model(model_values, model_grads_values)

    def get_transformed_poly(self):
        """
        Returns the transformed polynomial.

        :param Correlations self:
            An instance of the Correlations class.

        :return:
            **poly**: An instance of the Poly class.
        """
        return self.polystandard

    def get_correlated_from_uncorrelated(self, X):
        """
        Method for mapping uncorrelated variables from standard normal space to a new physical space in which variables are correlated.

        :param Correlations self: An instance of the Correlations object.
        :param numpy.ndarray X: Samples of uncorrelated points from the marginals; of shape (N,M)

        :return:
            **C**: A numpy.ndarray of shape (N, M), which contains the correlated samples.
        """
        X = X.T

        invA = np.linalg.inv(self.A)
        Z = np.linalg.solve(invA, X)
        Z = Z.T

        xc = np.zeros((len(Z[:, 0]), len(self.D)))
        for i in range(len(self.D)):
            for j in range(len(Z[:, 0])):
                xc[j, i] = self.std.get_cdf(points=Z[j, i])
        Xc = np.zeros((len(Z[:, 0]), len(self.D)))
        for i in range(len(self.D)):
            for j in range(len(Z[:, 0])):
                temporary = np.matrix(xc[j, i])
                temp = self.D[i].get_icdf(temporary)

                t = temp[0]
                Xc[j, i] = t
        return Xc

    def get_correlated_samples(self, N=None):
        """
        Method for generating correlated samples.

        :param int N: Number of correlated samples required.
        :param numpy.ndarray X: Samples of correlated points from the marginals; of shape (N,M)

        :return:
            **C**: A numpy.ndarray of shape (N, M), which contains the correlated samples.
        """
        if N is not None:

            distro = list()
            for i in range(len(self.D)):
                distro1 = self.std.get_samples(N)

                # check dimensions ------------------#
                distro1 = np.matrix(distro1)
                dimension = np.shape(distro1)
                if dimension[0] == N:
                    distro1 = distro1.T
                #------------------------------------#
                distro.append(distro1)

            distro = np.reshape(distro, (len(self.D), N))
            interm = np.dot(self.A, distro)
            correlated = np.zeros((len(self.D), N))
            for i in range(len(self.D)):
                for j in range(N):
                    correlated[i, j] = self.D[i].get_icdf(
                        self.std.get_cdf(interm[i, j]))
            correlated = correlated.T
            return correlated

        else:
            raise (
                ValueError,
                'One input must be given to "get Correlated Samples" method: please choose between sampling N points or giving an array of uncorrelated data '
            )
Exemplo n.º 26
0
class Correlations(object):
    """
    The class defines methods for polynomial approximations with correlated inputs, including the Nataf transform and Gram-Schmidt process.

    :param numpy.ndarray correlation_matrix: The correlation matrix associated with the joint distribution.
    :param Poly poly: Polynomial defined with parameters with marginal distributions in uncorrelated space.
    :param list parameters: List of parameters with marginal distributions.
    :param str method: `nataf-transform` or `gram-schmidt`.
    :param bool verbose: Display Cholesky decomposition of the fictive matrix.

    **References**
        1. Melchers, R. E., (1945) Structural Reliability Analysis and Predictions. John Wiley and Sons, second edition.
        2. Jakeman, J. D. et al., (2019) Polynomial chaos expansions for dependent random variables.
    """
    def __init__(self,
                 correlation_matrix,
                 poly=None,
                 parameters=None,
                 method=None,
                 verbose=False):
        if (poly is None) and (method is not None):
            raise ValueError('Need to specify poly for probability transform.')
        if poly is not None:
            self.poly = poly
            D = self.poly.get_parameters()
        elif parameters is not None:
            D = parameters
        else:
            raise ValueError('Need to specify either poly or parameters.')
        self.D = D
        self.R = correlation_matrix
        self.std = Parameter(order=5,
                             distribution='normal',
                             shape_parameter_A=0.0,
                             shape_parameter_B=1.0)
        inf_lim = -8.0
        sup_lim = -inf_lim
        p1 = Parameter(distribution='uniform',
                       lower=inf_lim,
                       upper=sup_lim,
                       order=31)
        myBasis = Basis('tensor-grid')
        self.Pols = Poly([p1, p1], myBasis, method='numerical-integration')
        Pols = self.Pols
        p = Pols.get_points()
        # w = Pols.get_weights()
        w = Pols.get_weights() * (sup_lim - inf_lim)**2
        p1 = p[:, 0]
        p2 = p[:, 1]
        R0 = np.eye((len(self.D)))
        for i in range(len(self.D)):
            for j in range(i + 1, len(self.D), 1):
                if self.R[i, j] == 0:
                    R0[i, j] = 0.0
                else:
                    z1 = np.array(self.D[i].get_icdf(
                        self.std.get_cdf(points=p1)))
                    z2 = np.array(self.D[j].get_icdf(
                        self.std.get_cdf(points=p2)))

                    tp11 = (z1 - self.D[i].mean) / np.sqrt(self.D[i].variance)
                    tp22 = (z2 - self.D[j].mean) / np.sqrt(self.D[j].variance)

                    coefficientsIntegral = np.flipud(tp11 * tp22 * w)

                    def check_difference(rho_ij):
                        bivariateNormalPDF = (
                            1.0 / (2.0 * np.pi * np.sqrt(1.0 - rho_ij**2)) *
                            np.exp(-1.0 / (2.0 * (1.0 - rho_ij**2)) *
                                   (p1**2 - 2.0 * rho_ij * p1 * p2 + p2**2)))
                        diff = np.dot(coefficientsIntegral, bivariateNormalPDF)
                        return diff - self.R[i, j]

                    # if (self.D[i].name!='custom') or (self.D[j].name!='custom'):
                    rho = optimize.newton(check_difference,
                                          self.R[i, j],
                                          maxiter=50)
                    # else:
                    #     # ???
                    #     res = optimize.least_squares(check_difference, self.R[i,j], bounds=(-0.999,0.999), ftol=1.e-03)
                    #     rho = res.x
                    #     print('A Custom Marginal is present')

                    R0[i, j] = rho
                    R0[j, i] = R0[i, j]
        self.R0 = R0.copy()

        self.A = np.linalg.cholesky(R0)
        if verbose:
            print('The Cholesky decomposition of fictive matrix R0 is:')
            print(self.A)
            print('The fictive matrix is:')
            print(R0)

        if method is None:
            pass
        elif method.lower() == 'nataf-transform':
            list_of_parameters = []
            for i in range(0, len(self.D)):
                standard_parameter = Parameter(order=self.D[i].order,
                                               distribution='gaussian',
                                               shape_parameter_A=0.,
                                               shape_parameter_B=1.)
                list_of_parameters.append(standard_parameter)

            # have option so that we don't need to obtain
            self.corrected_poly = deepcopy(self.poly)

            if hasattr(self.corrected_poly, '_quadrature_points'):
                self.corrected_poly._set_parameters(list_of_parameters)
                self.standard_samples = self.corrected_poly._quadrature_points
                self._points = self.get_correlated_samples(
                    X=self.standard_samples)
                # self.corrected_poly._quadrature_points = self._points.copy()
        elif method.lower() == 'gram-schmidt':
            basis_card = poly.basis.cardinality
            oversampling = 10

            N_Psi = oversampling * basis_card
            S_samples = self.get_correlated_samples(N=N_Psi)
            w_weights = 1.0 / N_Psi * np.ones(N_Psi)
            Psi = poly.get_poly(S_samples).T
            WPsi = np.diag(np.sqrt(w_weights)) @ Psi
            self.WPsi = WPsi

            R_Psi = np.linalg.qr(WPsi)[1]

            self.R_Psi = R_Psi
            self.R_Psi[0, :] *= np.sign(self.R_Psi[0, 0])
            self.corrected_poly = deepcopy(poly)
            self.corrected_poly.inv_R_Psi = np.linalg.inv(self.R_Psi)
            self.corrected_poly.corr = self
            self.corrected_poly._set_points_and_weights()

            P = self.corrected_poly.get_poly(
                self.corrected_poly._quadrature_points)
            W = np.mat(
                np.diag(np.sqrt(self.corrected_poly._quadrature_weights)))
            A = W * P.T
            self.corrected_poly.A = A
            self.corrected_poly.P = P

            if hasattr(self.corrected_poly, '_quadrature_points'):
                # TODO: Correlated quadrature points?
                self._points = self.corrected_poly._quadrature_points
        else:
            raise ValueError('Invalid method for correlations.')

    def get_points(self):
        """
        Returns the correlated samples based on the quadrature rules used in poly.

        :param Correlations self: An instance of the Correlations object.

        :return:
            **points**: A numpy.ndarray of sampled quadrature points with shape (number_of_samples, dimension).

        """
        return self._points

    def set_model(self, model=None, model_grads=None):
        """
        Computes the coefficients of the polynomial.

        :param Correlations self:
            An instance of the Correlations class.
        :param callable model:
            The function that needs to be approximated. In the absence of a callable function, the input can be the function evaluated at the quadrature points.
        :param callable model_grads:
            The gradient of the function that needs to be approximated. In the absence of a callable gradient function, the input can be a matrix of gradient evaluations at the quadrature points.
        """
        # Need to account for the nataf transform here?
        model_values = None
        model_grads_values = None
        if callable(model):
            model_values = evaluate_model(self._points, model)
        else:
            model_values = model
        if model_grads is not None:
            if callable(model_grads):
                model_grads_values = evaluate_model_gradients(
                    self._points, model_grads)
            else:
                model_grads_values = model_grads
        self.corrected_poly.set_model(model_values, model_grads_values)

    def get_transformed_poly(self):
        """
        Returns the transformed polynomial.

        :param Correlations self:
            An instance of the Correlations class.

        :return:
            **poly**: An instance of the Poly class.
        """
        return self.corrected_poly

    def get_correlated_samples(self, N=None, X=None):
        """
        Method for generating correlated samples.

        :param int N: Number of correlated samples required.
        :param ndarray X: (Optional) Points in the uncorrelated space to map to the correlated space.

        :return:
            **C**: A numpy.ndarray of shape (N, M), which contains the correlated samples.
        """
        d = len(self.D)

        if X is None:
            if N is None:
                raise ValueError(
                    'Need to specify number of points to generate.')
            X = np.random.multivariate_normal(np.zeros(d), np.eye(d), size=N)

        X_test = X @ self.A.T

        U_test = np.zeros(X_test.shape)
        for i in range(d):
            U_test[:, i] = self.std.get_cdf(X_test[:, i])

        Z_test = np.zeros(U_test.shape)
        for i in range(d):
            Z_test[:, i] = self.D[i].get_icdf(U_test[:, i])
        return Z_test

    def get_pdf(self, X):
        """
        Evaluate PDF at the sample points.
        :param numpy.ndarray X: Sample points (Number of points by dimensions)
        :return:
            **C**: A numpy.ndarray of shape (N,) with evaluations of the PDF.
        """

        parameters = self.D
        if len(X.shape) == 1:
            X = X.reshape(-1, 1)
        d = X.shape[1]

        U = np.zeros(X.shape)
        for i in range(d):
            U[:, i] = norm.ppf(parameters[i].get_cdf(X[:, i]))
        cop_num = multivariate_normal(mean=np.zeros(d), cov=self.R0).pdf(U)
        cop_den = np.prod(np.array([norm.pdf(U[:, i]) for i in range(d)]),
                          axis=0)
        marginal_prod = np.prod(np.array(
            [parameters[i].get_pdf(X[:, i]) for i in range(d)]),
                                axis=0)
        return cop_num / cop_den * marginal_prod
Exemplo n.º 27
0
    def __init__(self,
                 correlation_matrix,
                 poly=None,
                 parameters=None,
                 method=None,
                 verbose=False):
        if (poly is None) and (method is not None):
            raise ValueError('Need to specify poly for probability transform.')
        if poly is not None:
            self.poly = poly
            D = self.poly.get_parameters()
        elif parameters is not None:
            D = parameters
        else:
            raise ValueError('Need to specify either poly or parameters.')
        self.D = D
        self.R = correlation_matrix
        self.std = Parameter(order=5,
                             distribution='normal',
                             shape_parameter_A=0.0,
                             shape_parameter_B=1.0)
        inf_lim = -8.0
        sup_lim = -inf_lim
        p1 = Parameter(distribution='uniform',
                       lower=inf_lim,
                       upper=sup_lim,
                       order=31)
        myBasis = Basis('tensor-grid')
        self.Pols = Poly([p1, p1], myBasis, method='numerical-integration')
        Pols = self.Pols
        p = Pols.get_points()
        # w = Pols.get_weights()
        w = Pols.get_weights() * (sup_lim - inf_lim)**2
        p1 = p[:, 0]
        p2 = p[:, 1]
        R0 = np.eye((len(self.D)))
        for i in range(len(self.D)):
            for j in range(i + 1, len(self.D), 1):
                if self.R[i, j] == 0:
                    R0[i, j] = 0.0
                else:
                    z1 = np.array(self.D[i].get_icdf(
                        self.std.get_cdf(points=p1)))
                    z2 = np.array(self.D[j].get_icdf(
                        self.std.get_cdf(points=p2)))

                    tp11 = (z1 - self.D[i].mean) / np.sqrt(self.D[i].variance)
                    tp22 = (z2 - self.D[j].mean) / np.sqrt(self.D[j].variance)

                    coefficientsIntegral = np.flipud(tp11 * tp22 * w)

                    def check_difference(rho_ij):
                        bivariateNormalPDF = (
                            1.0 / (2.0 * np.pi * np.sqrt(1.0 - rho_ij**2)) *
                            np.exp(-1.0 / (2.0 * (1.0 - rho_ij**2)) *
                                   (p1**2 - 2.0 * rho_ij * p1 * p2 + p2**2)))
                        diff = np.dot(coefficientsIntegral, bivariateNormalPDF)
                        return diff - self.R[i, j]

                    # if (self.D[i].name!='custom') or (self.D[j].name!='custom'):
                    rho = optimize.newton(check_difference,
                                          self.R[i, j],
                                          maxiter=50)
                    # else:
                    #     # ???
                    #     res = optimize.least_squares(check_difference, self.R[i,j], bounds=(-0.999,0.999), ftol=1.e-03)
                    #     rho = res.x
                    #     print('A Custom Marginal is present')

                    R0[i, j] = rho
                    R0[j, i] = R0[i, j]
        self.R0 = R0.copy()

        self.A = np.linalg.cholesky(R0)
        if verbose:
            print('The Cholesky decomposition of fictive matrix R0 is:')
            print(self.A)
            print('The fictive matrix is:')
            print(R0)

        if method is None:
            pass
        elif method.lower() == 'nataf-transform':
            list_of_parameters = []
            for i in range(0, len(self.D)):
                standard_parameter = Parameter(order=self.D[i].order,
                                               distribution='gaussian',
                                               shape_parameter_A=0.,
                                               shape_parameter_B=1.)
                list_of_parameters.append(standard_parameter)

            # have option so that we don't need to obtain
            self.corrected_poly = deepcopy(self.poly)

            if hasattr(self.corrected_poly, '_quadrature_points'):
                self.corrected_poly._set_parameters(list_of_parameters)
                self.standard_samples = self.corrected_poly._quadrature_points
                self._points = self.get_correlated_samples(
                    X=self.standard_samples)
                # self.corrected_poly._quadrature_points = self._points.copy()
        elif method.lower() == 'gram-schmidt':
            basis_card = poly.basis.cardinality
            oversampling = 10

            N_Psi = oversampling * basis_card
            S_samples = self.get_correlated_samples(N=N_Psi)
            w_weights = 1.0 / N_Psi * np.ones(N_Psi)
            Psi = poly.get_poly(S_samples).T
            WPsi = np.diag(np.sqrt(w_weights)) @ Psi
            self.WPsi = WPsi

            R_Psi = np.linalg.qr(WPsi)[1]

            self.R_Psi = R_Psi
            self.R_Psi[0, :] *= np.sign(self.R_Psi[0, 0])
            self.corrected_poly = deepcopy(poly)
            self.corrected_poly.inv_R_Psi = np.linalg.inv(self.R_Psi)
            self.corrected_poly.corr = self
            self.corrected_poly._set_points_and_weights()

            P = self.corrected_poly.get_poly(
                self.corrected_poly._quadrature_points)
            W = np.mat(
                np.diag(np.sqrt(self.corrected_poly._quadrature_weights)))
            A = W * P.T
            self.corrected_poly.A = A
            self.corrected_poly.P = P

            if hasattr(self.corrected_poly, '_quadrature_points'):
                # TODO: Correlated quadrature points?
                self._points = self.corrected_poly._quadrature_points
        else:
            raise ValueError('Invalid method for correlations.')