def eval_rosenblattdd(sg_pdf, xs): op = pysgpp.createOperationRosenblattTransformation(sg_pdf.grid) X, Y = np.meshgrid(xs, xs) input_points = pysgpp.DataMatrix(len(xs), sg_pdf.d) output_points = pysgpp.DataMatrix(len(xs), sg_pdf.d) for i in range(len(xs)): for j in range(sg_pdf.d): input_points.set(i, j , 0.5) op.doTransformation(sg_pdf.alpha, input_points, output_points) print(output_points)
def cdf(self, x): # convert the parameter to the right format if isList(x): x = DataVector(x) elif isNumerical(x): x = DataVector([x]) # do the transformation if self.grid.getStorage().dim() == 1: op = createOperationRosenblattTransformation1D(self.grid) ans = np.ndarray(len(x)) for i, xi in enumerate(x.array()): ans[i] = op.doTransformation1D(self.alpha, xi) if len(ans) == 1: return ans[0] else: return ans else: if isinstance(x, DataMatrix): A = x B = DataMatrix(A.getNrows(), A.getNcols()) B.setAll(0.0) elif isinstance(x, DataVector): A = DataMatrix(1, len(x)) A.setRow(0, x) B = DataMatrix(1, len(x)) B.setAll(0) # do the transformation op = createOperationRosenblattTransformation(self.grid) op.doTransformation(self.alpha, A, B) # extract the outcome if isNumerical(x) or isinstance(x, DataVector): return B.get(0, 0) elif isinstance(x, DataMatrix): return B.array()
def cdf(self, x, shuffle=True): # convert the parameter to the right format x = self._convertEvalPoint(x) # transform the samples to the unit hypercube if self.trans is not None: x_unit = self.trans.probabilisticToUnitMatrix(x) else: x_unit = x # do the transformation if self.dim == 1: op = createOperationRosenblattTransformation1D(self.grid) ans = np.ndarray(x.shape[0]) for i, xi in enumerate(x_unit[:, 0]): ans[i] = op.doTransformation1D(self.unnormalized_alpha_vec, xi) if len(ans) == 1: return ans[0] else: return ans else: A = DataMatrix(x_unit) B = DataMatrix(x_unit.shape[0], x_unit.shape[1]) B.setAll(0.0) # do the transformation op = createOperationRosenblattTransformation(self.grid) if shuffle: op.doTransformation(self.alpha_vec, A, B) else: op.doTransformation(self.alpha_vec, A, B, 0) # extract the outcome if x_unit.shape == (1, 1): return B.get(0, 0) else: return B.array()
def main(): # Generate data print("generate dataset... ", end=' ') data_tr,_ = generate_friedman1(123456) print("Done") print("generated a friedman1 dataset (10D) with 2000 samples") # Config grid print("create grid config... ", end=' ') grid = sg.RegularGridConfiguration() grid.dim_ = 10 grid.level_ = 3 grid.type_ = sg.GridType_Linear print("Done") # Config adaptivity print("create adaptive refinement config... ", end=' ') adapt = sg.AdaptivityConfiguration() adapt.numRefinements_ = 0 adapt.noPoints_ = 10 print("Done") # Config solver print("create solver config... ", end=' ') solv = sg.SLESolverConfiguration() solv.maxIterations_ = 1000 solv.eps_ = 1e-14 solv.threshold_ = 1e-14 solv.type_ = sg.SLESolverType_CG print("Done") # Config regularization print("create regularization config... ", end=' ') regular = sg.RegularizationConfiguration() regular.regType_ = sg.RegularizationType_Laplace print("Done") # Config cross validation for learner print("create learner config... ", end=' ') #crossValid = sg.CrossvalidationConfiguration() crossValid = sg.CrossvalidationConfiguration() crossValid.enable_ = False crossValid.kfold_ = 3 crossValid.lambda_ = 3.16228e-06 crossValid.lambdaStart_ = 1e-1 crossValid.lambdaEnd_ = 1e-10 crossValid.lambdaSteps_ = 3 crossValid.logScale_ = True crossValid.shuffle_ = True crossValid.seed_ = 1234567 crossValid.silent_ = False print("Done") # # Create the learner with the given configuration # print("create the learner... ") learner = sg.LearnerSGDE(grid, adapt, solv, regular, crossValid) learner.initialize(data_tr) # Train the learner print("start training... ") learner.train() print("done training") # # Estimate the probability density function (pdf) via a Gaussian kernel # density estimation (KDE) and print the corresponding values # kde = sg.KernelDensityEstimator(data_tr) x = sg.DataVector(learner.getDim()) x.setAll(0.5) print("-----------------------------------------------") print(learner.getSurpluses().getSize(), " -> ", learner.getSurpluses().sum()) print("pdf_SGDE(x) = ", learner.pdf(x), " ~ ", kde.pdf(x), " = pdf_KDE(x)") print("mean_SGDE = ", learner.mean(), " ~ ", kde.mean(), " = mean_KDE") print("var_SGDE = ", learner.variance(), " ~ ", kde.variance(), " = var_KDE") # Print the covariances C = sg.DataMatrix(grid.dim_, grid.dim_) print("----------------------- Cov_SGDE -----------------------") learner.cov(C) print(C) print("----------------------- Cov_KDE -----------------------") kde.cov(C) print(C) # # Apply the inverse Rosenblatt transformatio to a matrix of random points. To # do this, first generate the random points uniformly, then initialize an # inverse Rosenblatt transformation operation and apply it to the points. # Finally print the calculated values # print("-----------------------------------------------") opInvRos = sg.createOperationInverseRosenblattTransformation(learner.getGrid()) points = sg.DataMatrix(randu_mat(12, grid.dim_)) print(points) pointsCdf = sg.DataMatrix(points.getNrows(), points.getNcols()) opInvRos.doTransformation(learner.getSurpluses(), points, pointsCdf) # # To check whether the results are correct perform a Rosenform transformation on # the data that has been created by the inverse Rosenblatt transformation above # and print the calculated values # points.setAll(0.0) opRos = sg.createOperationRosenblattTransformation(learner.getGrid()) opRos.doTransformation(learner.getSurpluses(), pointsCdf, points) print("-----------------------------------------------") print(pointsCdf) print("-----------------------------------------------") print(points)