def errorEstimationTerr(cdf_value, *args): """ Return total error given moments/variances """ assembledEstimationsList = packedList(args) return assembledEstimationsList[0] + normalInverseCDF( cdf_value[0]) * np.sqrt(assembledEstimationsList[1])
def minimalCostStatisticalError(inputDict, newLevels): """Compute sample numbers to satisfy tolerance on statistical error with minimal cost.""" # Variances variances = inputDict["blendedVariances"] if variances is None: # Variance was not blended hierarchy = inputDict["oldHierarchy"] if inputDict["parametersForModel"] is not None: # We have a valid model variance_params = inputDict["parametersForModel"][1] variance_model = inputDict["models"][1] variances = [variance_model(variance_params, l) for l in newLevels] # Use sample estimation for first level variances[0] = inputDict["estimations"][-1][0] * hierarchy[0][1] else: # No model: fall back to sample estimations variances = [v * n[1] for v, n in zip(inputDict["estimations"][-1], hierarchy)] # Costs cost_parameters = inputDict["costParameters"] if cost_parameters is not None: # We have a valid model cost_model = inputDict["costModel"] costs = [cost_model(cost_parameters, l) for l in newLevels] # Use sample estimation for first level costs[0] = inputDict["costEstimations"][0] else: # No model: fall back to sample estimations costs = inputDict["costEstimations"] # Compute this term - sum_{k=0}^L sqrt(C_k V_k) for later use cost_variance_sum = 0.0 for i, cost in enumerate(costs): cost_variance_sum += np.sqrt(cost * variances[i]) # Factor for sample numbers, independent of level cdf_value = normalInverseCDF(inputDict["errorParameters"][0][0]) tolerance = inputDict["tolerances"][0] * inputDict["splittingParameter"] constantFactor = (cdf_value / tolerance) ** 2 * cost_variance_sum # Compute number of samples new_samples = [] for i in range(len(newLevels)): if i > len(costs) - 1: # No data for this level. Set sample number to zero. # The hierarchy optimiser enforces the minimal number of samples per level new_samples.append(0) continue variance_level = variances[i] new_sample_nb = constantFactor * np.sqrt(variance_level / costs[i]) new_samples.append(int(np.ceil(new_sample_nb))) return new_samples
def errorEstimationStatError(cdfValue, globalEstimations): """ Accept the summation over the variances divided by number of samples over all levels and return its square root as the statistical error """ if cdfValue is None: cdfValue = 1 # default behavior else: cdfValue = normalInverseCDF( cdfValue[0]) # [Pisaroni et al.,CMLMC,pag.25] error = cdfValue * np.sqrt(globalEstimations[0]) # Ensure no NumPy type return float(error)
def test_normalInverseCDF(self): correct_inverse_cdf_values = [ -2.3263478740408408, -1.180559456612439, -0.7461851862161866, -0.4215776353171568, -0.13689839042801627, 0.13689839042801613, 0.4215776353171568, 0.7461851862161862, 1.180559456612439, 2.3263478740408408 ] cdf_values = np.linspace(0.01, 0.99, num=10) inverse_cdf_values = [ tools.normalInverseCDF(value) for value in cdf_values ] for i in range(0, len(inverse_cdf_values)): self.assertEqual(inverse_cdf_values[i], correct_inverse_cdf_values[i])
def errorEstimationStatError(ignore, *args): """ Accept the summation over the variances divided by number of samples over all levels and return its square root as the statistical error """ assembledEstimationsList = packedList(args) # TODO - Think of better place for this assertion assert ( len(assembledEstimationsList) == 1 ), "length of assembledEstimationsList passed to errorEstimationStatError is not 1" if ignore is None: cdf_value = 1 # default behavior else: cdf_value = normalInverseCDF( ignore[0]) # [Pisaroni et al.,CMLMC,pag.25] return cdf_value * np.sqrt(assembledEstimationsList[0])
def setUp(self): self.variableDimension = int(self._randomGenerator.uniform(1, 11, 1)) self.mean = self._randomGenerator.uniform(-10, 10, self.variableDimension) self.variance = self._randomGenerator.uniform(0.1, 0.5, self.variableDimension) # Set number of samples # Accepted probability that estimated expectation does not satisfy tolerance failureProbability = 10**-4 requiredStD = normalInverseCDF(1 - failureProbability / 2) / self.tolerance # Choose number of samples to get this failure probability self.numberOfSamples = int(np.max(self.variance * requiredStD**2)) # Safety factor for higher-order moments self.numberOfSamples = self.numberOfSamples**2 # Safety bounds self.numberOfSamples = max(10**3, min(10**6, self.numberOfSamples))