Beispiel #1
0
    def solve(self, results, gradient_results=None, solver=None, settings=None, matrix=None, verbose=False):
        """
        Determines gPC coefficients

        Parameters
        ----------
        results : [n_grid x n_out] np.ndarray of float
            Results from simulations with N_out output quantities
        gradient_results : ndarray of float [n_gradient x n_out x dim], optional, default: None
            Gradient of results in original parameter space in specific grid points
        solver : str
            Solver to determine the gPC coefficients
            - 'Moore-Penrose' ... Pseudoinverse of gPC matrix (SGPC.Reg, EGPC)
            - 'OMP' ... Orthogonal Matching Pursuit, sparse recovery approach (SGPC.Reg, EGPC)
            - 'LarsLasso' ... Least-Angle Regression using Lasso model (SGPC.Reg, EGPC)
            - 'NumInt' ... Numerical integration, spectral projection (SGPC.Quad)
        settings : dict
            Solver settings
            - 'Moore-Penrose' ... None
            - 'OMP' ... {"n_coeffs_sparse": int} Number of gPC coefficients != 0 or "sparsity": float 0...1
            - 'LarsLasso' ... {"alpha": float 0...1} Regularization parameter
            - 'NumInt' ... None
        matrix : ndarray of float, optional, default: self.gpc_matrix or [self.gpc_matrix, self.gpc_matrix_gradient]
            Matrix to invert. Depending on gradient_enhanced option, this matrix consist of the standard gPC matrix and
            their derivatives.
        verbose : bool
            boolean value to determine if to print out the progress into the standard output

        Returns
        -------
        coeffs: ndarray of float [n_coeffs x n_out]
            gPC coefficients
        """

        ge_str = ""

        if matrix is None:
            matrix = self.gpc_matrix

            if self.gradient is False:
                matrix = self.gpc_matrix
                ge_str = ""
            else:
                if not solver == 'NumInt':
                    if self.gpc_matrix_gradient is not None:
                        matrix = np.vstack((self.gpc_matrix, self.gpc_matrix_gradient))
                    else:
                        matrix = self.gpc_matrix
                    ge_str = "(gradient enhanced)"
                else:
                    Warning("Gradient enhanced version not applicable in case of numerical integration (quadrature).")

        # use default solver if not specified
        if solver is None:
            solver = self.solver

        # use default solver settings if not specified
        if solver is None:
            settings = self.settings

        iprint("Determine gPC coefficients using '{}' solver {}...".format(solver, ge_str),
               tab=0, verbose=verbose)

        # construct results array
        if not solver == 'NumInt' and gradient_results is not None:
            # transform gradient of results according to projection
            if self.p_matrix is not None:
                gradient_results = np.matmul(gradient_results,
                                             self.p_matrix.transpose() * self.p_matrix_norm[np.newaxis, :])

            results_complete = np.vstack((results, ten2mat(gradient_results)))
        else:
            results_complete = results

        #################
        # Moore-Penrose #
        #################
        if solver == 'Moore-Penrose':
            # determine pseudoinverse of gPC matrix
            self.matrix_inv = np.linalg.pinv(matrix)

            try:
                coeffs = np.matmul(self.matrix_inv, results_complete)
            except ValueError:
                raise AttributeError("Please check format of parameter sim_results: [n_grid (* dim) x n_out] "
                                     "np.ndarray.")

        ###############################
        # Orthogonal Matching Pursuit #
        ###############################
        elif solver == 'OMP':
            # transform gPC matrix to fastmat format
            matrix_fm = fm.Matrix(matrix)

            if results_complete.ndim == 1:
                results_complete = results_complete[:, np.newaxis]

            # determine gPC-coefficients of extended basis using OMP
            if "n_coeffs_sparse" in settings.keys():
                n_coeffs_sparse = int(settings["n_coeffs_sparse"])
            elif "sparsity" in settings.keys():
                n_coeffs_sparse = int(np.ceil(matrix.shape[1]*settings["sparsity"]))
            else:
                raise AttributeError("Please specify 'n_coeffs_sparse' or 'sparsity' in solver settings dictionary!")

            coeffs = fm.algs.OMP(matrix_fm, results_complete, n_coeffs_sparse)

        ################################
        # Least-Angle Regression Lasso #
        ################################
        elif solver == 'LarsLasso':

            if results_complete.ndim == 1:
                results_complete = results_complete[:, np.newaxis]

            # determine gPC-coefficients of extended basis using LarsLasso
            reg = linear_model.LassoLars(alpha=settings["alpha"], fit_intercept=False)
            reg.fit(matrix, results_complete)
            coeffs = reg.coef_

            if coeffs.ndim == 1:
                coeffs = coeffs[:, np.newaxis]
            else:
                coeffs = coeffs.transpose()

        # TODO: @Lucas: Please add GPU support
        #########################
        # Numerical Integration #
        #########################
        elif solver == 'NumInt':
            # check if quadrature rule (grid) fits to the probability density distribution (pdf)
            grid_pdf_fit = True
            for i_p, p in enumerate(self.problem.parameters_random):
                if self.problem.parameters_random[p].pdf_type == 'beta':
                    if not (self.grid.grid_type[i_p] == 'jacobi'):
                        grid_pdf_fit = False
                        break
                elif self.problem.parameters_random[p].pdf_type in ['norm', 'normal']:
                    if not (self.grid.grid_type[i_p] == 'hermite'):
                        grid_pdf_fit = False
                        break

            # if not, calculate joint pdf
            if not grid_pdf_fit:
                joint_pdf = np.ones(self.grid.coords_norm.shape)

                for i_p, p in enumerate(self.problem.parameters_random):
                    joint_pdf[:, i_p] = \
                        self.problem.parameters_random[p].pdf_norm(x=self.grid.coords_norm[:, i_p])

                joint_pdf = np.array([np.prod(joint_pdf, axis=1)]).transpose()

                # weight sim_results with the joint pdf
                results_complete = results_complete * joint_pdf * 2 ** self.problem.dim

            # scale rows of gpc matrix with quadrature weights
            matrix_weighted = np.matmul(np.diag(self.grid.weights), matrix)

            # determine gpc coefficients [n_coeffs x n_output]
            coeffs = np.matmul(results_complete.transpose(), matrix_weighted).transpose()

        else:
            raise AttributeError("Unknown solver: '{}'!")

        return coeffs
Beispiel #2
0
print("------------------------------------------------------------")

# define some constants
N = 100  # number of measurements (height of dictionary)
M = 500  # width of dictionary
K = 30  # sparsity (non-zero components in support)

# define baseline random support (1.0 forces dtype to be float)
s = scipy.sparse.rand(M, 1, 1.0 * K / M).todense()
x0 = s - 1j * s

# define some dictionary
# (dictionary matrix holds first N entries of M-Fourier matrix)
# option to choose a complex one or a non-complex one
matG = fastmat.Matrix(
        numpy.random.normal(0.0, numpy.sqrt(N), (N, M)) + \
        1j * fastmat.Matrix(numpy.random.normal(0.0, numpy.sqrt(N), (N, M)))
    )

# option 1
mat = fastmat.Product(matG, fastmat.Fourier(M))

# option 2
#mat = matG

# generate measurements from baseline support
b = mat * x0

# run OMP and ISTA
# result = fastmat.OMP(mat, b, K)
xOMP = printTime("running OMP", fastmat.algs.OMP, mat, b, K)
# result = fastmat.ISTA(mat, b, numLambda=1e4)
Beispiel #3
0
# create the correlation matrix
print(" * Create the correlation matrix")
fmatLx1 = fastmat.Circulant(c)

# calculate the gradient in the directions
# using the efficient way
print(" * Perform edge detection while exploiting structure")
s = time.time()
arrEdgesX = np.abs(fmatLx1 * arrHead)
arrEdgesY = np.abs(fmatLx1 * arrHead.T).T
numFastTime = time.time() - s

# cast the matrix to a standard numpy array
print(" * Generate unstructured reference matrix for speed comparison")
fmatLx2 = fastmat.Matrix(fmatLx1.array)

# calculate the gradient in the directions
# using the inefficient way
print(" * Perform edge detection without exploting structure")
s = time.time()
arrEdgesX = np.abs(fmatLx2 * arrHead)
arrEdgesY = np.abs(fmatLx2 * arrHead.T).T
numSlowTime = time.time() - s

# calc the total gradient
arrEdges = np.sqrt(arrEdgesX**2 + arrEdgesY**2)

print("\nResults:")
print("   Fast Detection: %.3fs" % (numFastTime))
print("   Reference     : %.3fs" % (numSlowTime))
Beispiel #4
0
arrP = genPulse(numSignalSize, numPulseWidth, numPulseFreq)
timePulse = time.time() - s


# generate a circulant dictionary which will serve as linear operator for the
# convolution
print(" * Create the dictionary")
s = time.time()
matC = fastmat.Circulant(arrP)
timeDictionary = time.time() - s


# create an explicit version of the dictionary
# that disregards the circulant structure
print(" * Create the unstructured matrix for speed comparison")
matCHat = fastmat.Matrix(matC._getArray())


# generate a random sequence of spikes where position and amplitudes are random
print(" * Generating the ground truth as a sequence of spikes")
s = time.time()
arrX = genGroundTruth(numSignalSize, numSlices, numPulses)
timeGroundTruth = time.time() - s


# do the measurement (apply the forward model), i.e. do the convolution
print(" * Apply the forward model to generate the signal")
s = time.time()
arrY = matC * arrX
timeForward = time.time() - s
Beispiel #5
0
# draw a dense random and normalized array
print(" * Generate the Matrix")
arrFull = npr.randn(numMatSize, numMatSize) / np.sqrt(numMatSize)

# get the SVD
print(" * Calc the SVD")
arrU, vecSigma, arrV = npl.svd(arrFull)

print(" * Truncate the singular values")
vecT = np.linspace(0, 3, numMatSize - numSize - 1)
vecSigma[numSize + 1:] = vecSigma[numSize + 1:] * 0.1 * np.exp(-vecT)

print(" * Rebuild the matrix with truncated singular values")
arrFull = arrU.dot(np.diag(vecSigma).dot(arrV.T))

matFull = fastmat.Matrix(arrFull)
matApprox = fastmat.LowRank(vecSigma[:numSize], arrU[:, :numSize],
                            arrV[:, :numSize])

print(" * Generate the linear system")
vecX = npr.randn(numMatSize)
vecB = matFull * vecX

s = time.time()
y1 = matFull * vecX
timeDenseFwd = time.time() - s

s = time.time()
y2 = matApprox * vecX
timeApproxFwd = time.time() - s