def computeDetectionSize(self, probabilityLevel, confidenceLevel=None): defectMin = self._defects.getMin()[0] defectMax = self._defects.getMax()[0] # compute 'a90' model = self._buildModel(1. - probabilityLevel) try: detectionSize = ot.PointWithDescription( 1, ot.Brent().solve(model, self._detectionBoxCox, defectMin, defectMax)) except: raise Exception('The POD model does not contain, for the given ' + \ 'defect interval, the wanted probability level.') description = ['a' + str(int(probabilityLevel * 100))] # compute 'a90_95' if confidenceLevel is not None: modelCl = self.getPODCLModel(confidenceLevel) if not (modelCl([defectMin])[0] <= probabilityLevel <= modelCl( [defectMax])[0]): raise Exception('The POD model at the confidence level does not '+\ 'contain, for the given defect interval, the '+\ 'wanted probability level.') detectionSize.add(ot.Brent().solve(modelCl, probabilityLevel, defectMin, defectMax)) description.append('a'+str(int(probabilityLevel*100))+'/'\ +str(int(confidenceLevel*100))) # add description to the Point detectionSize.setDescription(description) return detectionSize
def getSensitivityAnalysisResultsMetamodel(self, metamodel, doe_sobol, size, N_LHS, N_KL_Young, N_KL_Diam, R2): dim = doe_sobol.getDimension() try : meta_response = sequentialFunctionEvaulation(metamodel.__kriging_metamodel__, doe_sobol) except Exception as e: print('Caugt exception:\n',e) print('filling response with zeros...') meta_response = ot.Sample(np.zeros((doe_sobol.getSize(),1))) sensitivity_analysis = klfs.SobolKarhunenLoeveFieldSensitivityAlgorithm() sensitivity_analysis.setDesign(doe_sobol, meta_response, size) sensitivity_analysis.setEstimator(ot.MartinezSensitivityAlgorithm()) FO_indices = sensitivity_analysis.getFirstOrderIndices()[0] conf_level = sensitivity_analysis.getFirstOrderIndicesInterval()[0] lb = conf_level.getLowerBound() ub = conf_level.getUpperBound() result_point = ot.PointWithDescription([('meta',1.),('N_LHS',N_LHS),('size',size),('kl_dimension',dim), ('N_KL_Young',N_KL_Young), ('N_KL_Diam',N_KL_Diam), ('R2',R2), ('SI_E',FO_indices[0][0]),('SI_E_lb',lb[0]),('SI_E_ub',ub[0]), ('SI_D',FO_indices[1][0]),('SI_D_lb',lb[1]),('SI_D_ub',ub[1]), ('SI_FP',FO_indices[2][0]),('SI_FP_lb',lb[2]),('SI_FP_ub',ub[2]), ('SI_FN',FO_indices[3][0]),('SI_FN_lb',lb[3]),('SI_FN_ub',ub[3])]) print('------------- RESULTS ------------') print('------------ META MODEL ----------') print('-----------SIZE LHS : {} ---------'.format(int(N_LHS))) print(result_point) return result_point
def _getResultValue(self, test, description): """ Generalized accessor method for the R2 or p-values. Parameters ---------- test : string name of the keys for the dictionnary. description : string name the test to be displayed. """ if self._censored: pValue = ot.PointWithDescription( [(description + ' for uncensored case', self._resultsUnc.testResults[test]), (description + ' for censored case', self._resultsCens.testResults[test])]) else: pValue = ot.PointWithDescription( [(description + ' for uncensored case', self._resultsUnc.testResults[test])]) return pValue
def getStandardError(self): """ Accessor to the standard error of the estimate. Returns ------- stderr : :py:class:`openturns.Point` The standard error of the estimate for the uncensored and censored (if so) linear regression model. """ if self._censored: stderr = ot.PointWithDescription( [('Stderr for uncensored case', self._resultsUnc.stderr), ('Stderr for censored case', self._resultsCens.stderr)]) else: stderr = ot.PointWithDescription( [('Stderr for uncensored case', self._resultsUnc.stderr)]) return stderr
def getSlope(self): """ Accessor to the slope of the linear regression model. Returns ------- slope : :py:class:`openturns.Point` The slope parameter for the uncensored and censored (if so) linear regression model. """ if self._censored: slope = ot.PointWithDescription( [('Slope for uncensored case', self._resultsUnc.slope), ('Slope for censored case', self._resultsCens.slope)]) else: slope = ot.PointWithDescription( [('Slope for uncensored case', self._resultsUnc.slope)]) return slope
def getIntercept(self): """ Accessor to the intercept of the linear regression model. Returns ------- intercept : :py:class:`openturns.Point` The intercept parameter for the uncensored and censored (if so) linear regression model. """ if self._censored: intercept = ot.PointWithDescription( [('Intercept for uncensored case', self._resultsUnc.intercept), ('Intercept for censored case', self._resultsCens.intercept)]) else: intercept = ot.PointWithDescription( [('Intercept for uncensored case', self._resultsUnc.intercept)]) return intercept
def _computeDetectionSize(self, model, modelCl=None, probabilityLevel=None, confidenceLevel=None, defectMin=None, defectMax=None): """ Compute the detection size for a given probability level. Parameters ---------- probabilityLevel : float The probability level for which the defect size is computed. confidenceLevel : float The confidence level associated to the given probability level the defect size is computed. Default is None. Returns ------- result : collection of :py:class:`openturns.PointWithDescription` A PointWithDescription containing the detection size computed at the given probability level and confidence level if provided. """ if defectMin is None: defectMin = self._inputSample.getMin()[0] if defectMax is None: defectMax = self._inputSample.getMax()[0] # compute 'a90' if not (model([defectMin])[0] <= probabilityLevel <= model([defectMax])[0]): raise Exception('The POD model does not contain, for the given ' + \ 'defect interval, the wanted probability level.') detectionSize = ot.PointWithDescription(1, ot.Brent().solve(model, probabilityLevel, defectMin, defectMax)) description = ['a'+str(int(probabilityLevel*100))] # compute 'a90_95' if confidenceLevel is not None: if not (modelCl([defectMin])[0] <= probabilityLevel <= modelCl([defectMax])[0]): raise Exception('The POD model at the confidence level does not '+\ 'contain, for the given defect interval, the '+\ 'wanted probability level.') detectionSize.add(ot.Brent().solve(modelCl, probabilityLevel, defectMin, defectMax)) description.append('a'+str(int(probabilityLevel*100))+'/'\ +str(int(confidenceLevel*100))) # add description to the Point detectionSize.setDescription(description) return detectionSize
def getAggregatedTotalOrderIndices(self): '''Returns the agrregated total order indices Returns ------- aggFO_indices : list of ot.Point ''' self.__fastResultCheck__() aggTO_indices = list() for i in range(self.__nOutputs__): aggTO_indices.append( ot.PointWithDescription( self.__results__[i].getAggregatedTotalOrderIndices())) aggTO_indices[i].setName('Sobol_' + self.outputDesign[i].getName()) aggTO_indices[i].setDescription([ self.outputDesign[i].getName() + '_' + self.inputDescription[j] for j in range(self.__nSobolIndices__) ]) return aggTO_indices
def getSensitivityAnalysisResults(self, sample_in, sample_out, size, N_KL_Young, N_KL_Diam): dim = sample_in.getDimension() # Dimensoin of the KL input vector sensitivity_analysis = klfs.SobolKarhunenLoeveFieldSensitivityAlgorithm() sensitivity_analysis.setDesign(sample_in, sample_out, size) sensitivity_analysis.setEstimator(ot.MartinezSensitivityAlgorithm()) FO_indices = sensitivity_analysis.getFirstOrderIndices()[0] conf_level = sensitivity_analysis.getFirstOrderIndicesInterval()[0] lb = conf_level.getLowerBound() ub = conf_level.getUpperBound() result_point = ot.PointWithDescription([('meta',0.),('N_LHS',-1.),('size',size),('kl_dimension',dim), ('N_KL_Young',N_KL_Young), ('N_KL_Diam',N_KL_Diam), ('R2',-1), ('SI_E',round(FO_indices[0][0],5)),('SI_E_lb',round(lb[0],5)),('SI_E_ub',round(ub[0],5)), ('SI_D',round(FO_indices[1][0],5)),('SI_D_lb',round(lb[1],5)),('SI_D_ub',round(ub[1],5)), ('SI_FP',round(FO_indices[2][0],5)),('SI_FP_lb',round(lb[2],5)),('SI_FP_ub',round(ub[2],5)), ('SI_FN',round(FO_indices[3][0],5)),('SI_FN_lb',round(lb[3],5)),('SI_FN_ub',round(ub[3],5))]) print('------------- RESULTS ------------') print('------------ REAL MODEL ----------') print('----------- SIZE DOE = {} -----------'.format(str(int(size)))) print(result_point) return result_point
p2[1] = 200. s1[0] = p2 p3 = ot.Point(2, 0.) p3.setName('Two') p3[0] = 101. p3[1] = 201. s1[1] = p3 p4 = ot.Point(2, 0.) p4.setName('Three') p4[0] = 102. p4[1] = 202. s1[2] = p4 myStudy.add('mySample', s1) # Add a point with a description pDesc = ot.PointWithDescription(p1) desc = pDesc.getDescription() desc[0] = 'x' desc[1] = 'y' desc[2] = 'z' pDesc.setDescription(desc) myStudy.add(pDesc) # Add a matrix matrix = ot.Matrix(2, 3) matrix[0, 0] = 0 matrix[0, 1] = 1 matrix[0, 2] = 2 matrix[1, 0] = 3 matrix[1, 1] = 4 matrix[1, 2] = 5
def project(self, args): '''Project a function or a field on the eigenmodes basis. As the eigenmode basis is constructed over the decomposition of centered processes and iso probabilstic transformations of centered scalar distributions, objects that are to projected have to be centered first! ''' args = atLeastList(args) nArgs = len(args) nProcess = self.__field_distribution_count__ isAggreg = self.__isAggregated__ homogenMesh = self.__unified_mesh__ homogenDim = self.__unified_dimension__ assert isinstance(args[0], (ot.Field, ot.Sample, ot.ProcessSample, ot.AggregatedFunction, ot.SampleImplementation)) for i in range(nArgs): addConstant2Iterable(args[i],-1*self.__means__[i]) # We then subtract the mean of each process to any entry, so we are again in the centered case if isAggreg : print('projection of aggregated process') assert nProcess==1, 'do not work with lists of aggregated processes' assert homogenMesh, 'if aggregated then the mesh is shared' assert homogenDim, 'if aggregated then the dimension is shared' inDim = self.__KLResultsAndDistributions__[0].getCovarianceModel().getInputDimension() outDim = self.__KLResultsAndDistributions__[0].getCovarianceModel().getOutputDimension() if isinstance(args[0], (ot.Field, ot.ProcessSample, ot.AggregatedFunction)): try : fdi = args[0].getInputDimension() except : fdi = args[0].getMesh().getDimension() try : fdo = args[0].getOutputDimension() except : fdo = args[0].getDimension() if fdi == inDim and fdo == outDim : if nArgs > 1 and not isinstance(args[0], ot.ProcessSample): sample = ot.Sample([self.__KL_projecting__[0](args[i]) for i in range(nArgs)]) sample.setDescription(self.__mode_description__) return sample elif isinstance(args[0], ot.Field) : projection = self.__KL_projecting__[0](args[0]) projDescription = list(zip(self.__mode_description__, projection)) projection = ot.PointWithDescription(projDescription) return projection elif isinstance(args[0], ot.ProcessSample): projection = self.__KL_projecting__[0](args[0]) projection.setDescription(self.__mode_description__) return projection else : raise Exception('InvalidDimensionException') else : if isinstance(args[0], (ot.Field, ot.Sample)): print('projection of a list of {} '.format(', '.join([args[i].__class__.__name__ for i in range(nArgs)]))) assert nArgs==nProcess, 'Pass a list of same length then aggregation order' try: projection =list() for i in range(nProcess): if isinstance(args[i], (ot.Sample, ot.Field)) and self.__isProcess__[i]: projection.append(list(self.__KLResultsAndDistributions__[i].project(args[i]))) else : ELEM = list(self.__KL_projecting__[i](args[i])) projection.append(ELEM) # this comprehensive list transforms a list[list[float], ot.Point] into a flat list of floats projectionFlat = [item if not isinstance(item,(ot.Point)) else item[0] for sublist in projection for item in sublist] output = ot.PointWithDescription(list(zip(self.__mode_description__, projectionFlat))) return output except Exception as e : raise e elif isinstance(args[0], ot.ProcessSample): print('projection of a list of {} '.format(', '.join([args[i].__class__.__name__ for i in range(nArgs)]))) assert nArgs==nProcess, 'Pass a list of same length then aggregation order' try: projectionSample = ot.Sample(0,sum(self.__mode_count__)) sampleSize = args[0].getSize() #print('Process args are:',args) projList = [] for idx in range(nProcess): if self.__isProcess__[idx]: projList.append(self.__KL_projecting__[idx](args[idx])) else: distributionSample = ot.Sample([args[idx][i][0] for i in range(args[idx].getSize())]) projList.append(self.__KL_projecting__[idx](distributionSample)) for idx in range(sampleSize): l = [list(projList[j][idx]) for j in range(nProcess)] projectionSample.add( [item for sublist in l for item in sublist]) projectionSample.setDescription(self.__mode_description__) return projectionSample except Exception as e: raise e