def optimalHierarchy(self, inputDict): """ Compute the best hierarchy by individually computing the best index set and best number of samples """ inputDict = self.toleranceSplitting(inputDict) indices = self.optimalIndexSet(inputDict) # TODO - Think of a better way to do the following. Very fragile. if self.isVarianceBlended is True: inputDict["variancesForHierarchy"] = self.varianceBlender.blend( indices, inputDict) elif "estimations" in inputDict.keys() and isinstance( inputDict["estimations"], list): inputDict["variancesForHierarchy"] = inputDict["estimations"][-1] sample_numbers = self.optimalSampleNumbers(inputDict, indices) sample_numbers = [ max(inputDict["minimalSamplesPerIndex"], sample_numbers[i]) for i in range(len(sample_numbers)) ] old_hierarchy = inputDict["oldHierarchy"] new_hierarchy = [] if len(old_hierarchy[0]) == 0: new_hierarchy = [indices, sample_numbers] else: new_hierarchy = mergeTwoListsIntoOne(indices, sample_numbers) return new_hierarchy
def optimalHierarchy(self, inputDict): """ Compute the best hierarchy by individually computing the best index set and best number of samples """ inputDict["tolerances"] = [self.tolerance] inputDict = self.toleranceSplitting(inputDict) new_ind = self.optimalIndexSet(inputDict) # Enforce index space bounds # 2nd condition necessary until MC indices are fixed if self.indexSpace and new_ind[0]: indices = [i for i in new_ind if i[0] <= max(self.indexSpace)] else: indices = new_ind # TODO - Think of a better way to do the following. Very fragile. if self.isVarianceBlended and inputDict["parametersForModel"][0]: inputDict["blendedVariances"] = self.varianceBlender.blend( indices, inputDict) sample_numbers = self.optimalSampleNumbers(inputDict, indices) sample_numbers = [ max(inputDict["minimalSamplesPerIndex"], sample_numbers[i]) for i in range(len(sample_numbers)) ] old_hierarchy = inputDict["oldHierarchy"] new_hierarchy = [] if len(old_hierarchy[0]) == 0: new_hierarchy = [indices, sample_numbers] else: new_hierarchy = mergeTwoListsIntoOne(indices, sample_numbers) return new_hierarchy
def optimalHierarchy(self, inputDict): """ Compute the best hierarchy by individually computing the best index set and best number of samples """ inputDict = self.toleranceSplitting(inputDict) indices = self.optimalIndexSet(inputDict) # TODO - Think of a better way to do the following if self.isVarianceBlended is True: inputDict['variancesForHierarchy'] = self.varianceBlender.blend(indices,inputDict) sample_numbers = self.optimalSampleNumbers(inputDict, indices) sample_numbers = [max(inputDict['minimalSamplesPerIndex'],sample_numbers[i]) for i in range(len(sample_numbers))] old_hierarchy = inputDict['oldHierarchy'] new_hierarchy = [] if (len(old_hierarchy[0]) == 0): new_hierarchy = [indices,sample_numbers] else: new_hierarchy = mergeTwoListsIntoOne(indices,sample_numbers) return new_hierarchy
def updatePredictors(self, predictorCoordinates=None): """ For an entry of qoiPredictor, retrieve all index estimations for the corresponding entry of estimatorsForPredictor and pass to the update method of qoiPredictor. If self.isCostUpdated is True, then do the same for costPredictor as well """ # TODO - The implicit assumption here is that there is a one-to-one map # between levelwise estimations and model built upon that estimation. We # have not yet allowed for a mechanism similar to the assembler mechanism # but at the indexwise quantities. Should we construct such a mechanism? if not self.qoiPredictor: return # If nothing is specified, update all qoiPredictors if predictorCoordinates is None: predictorCoordinates = range(len(self.qoiPredictor)) # Extract current hierarchy from list of indices hierarchy = self.hierarchy() [indices, number_samples] = splitOneListIntoTwo(hierarchy) # Retrieve all index estimations corresponding to each entry of predictorCooridnates for i in predictorCoordinates: # Extract qoiEstimator coordinates and value arguements from estimatorsForPredictor qoi_estimator_coordinate = self.estimatorsForPredictor[i][0] qoi_estimator_value_arguements = self.estimatorsForPredictor[i][1] value_transform_function = self.estimatorsForPredictor[i][2] # Assemble this quantity across all indices and apply transformation estimations = self.indexEstimation( qoi_estimator_coordinate, qoi_estimator_value_arguements ) # TODO - qoiEstimator.value returns Var(Y_l)/N_l, whereas the model is fit # on Var(Y_l). The following if-else is a hack to multiply by the number of # samples . It is here temporarily until a better mechanism can be found. # Refer the to-do above. if qoi_estimator_value_arguements[-1] is False: estimationsForPredictors = [value_transform_function(i) for i in estimations] else: estimationsForPredictors = [ value_transform_function(index, number_samples[i]) for i, index in enumerate(estimations) ] # Extract only the indices for which all of the multi-index components are non-zero data = [] for j in range(len(indices)): index = indices[j] if all([index[i] > 0 for i in range(len(index))]): data.append([index, estimationsForPredictors[j]]) else: pass # Run update method of qoiPredictor on these index estimations self.qoiPredictor[i].update(data) # Retrieve all index cost estimations and pass them to costPredictor if self.isCostUpdated: estimationsForPredictors = self.indexCostEstimation(self.costEstimatorForPredictor) # Extract only the indices for which all of the multi-index components are non-zero data = [] for j in range(len(indices)): index = indices[j] if all([index[i] > 0 for i in range(len(index))]): data.append([index, estimationsForPredictors[j]]) else: pass data = mergeTwoListsIntoOne(indices, estimationsForPredictors) self.costPredictor.update(data)