def test_pct2sig():
    assert pct2sig(p=0.0013499, bound=StatBound.ONESIDED) == pytest.approx(-3)
    assert pct2sig(p=0.9973002, bound=StatBound.TWOSIDED) == pytest.approx( 3)
    assert pct2sig(p=0.0026998, bound=StatBound.TWOSIDED) == pytest.approx( 3)
    with pytest.raises(ValueError):
        pct2sig(p=0, bound=StatBound.TWOSIDED)
    with pytest.raises(ValueError):
        pct2sig(p=1, bound=StatBound.TWOSIDED)
    with pytest.raises(ValueError):
        pct2sig(p=0.5, bound=None)
Beispiel #2
0
def integration_error(
    nums: list[float],
    dimension: int = None,
    volume: float = 1,
    conf: float = 0.95,
    samplemethod: SampleMethod = SampleMethod.RANDOM,
    runningerror: bool = False,
) -> float | list[float]:

    integration_args_check(error=None,
                           volume=volume,
                           stdev=None,
                           conf=conf,
                           samplemethod=samplemethod,
                           dimension=dimension)

    n = len(nums)
    if n == 1:
        error1sig = volume

    elif not runningerror:
        stdev = np.std(nums, ddof=1)
        if samplemethod == SampleMethod.RANDOM:
            error1sig = volume * stdev / np.sqrt(n)
        elif samplemethod == SampleMethod.SOBOL:
            error1sig = volume * stdev * np.log(n)**dimension / n

    else:
        # Use Welford's algorithm to calculate the running variance
        M = np.zeros(n)  # Running mean
        S = np.zeros(n)  # Sum of variances
        M[0] = nums[0]
        for i in range(1, n):
            M[i] = M[i - 1] + (nums[i] - M[i - 1]) / (i + 1)
            S[i] = S[i - 1] + (nums[i] - M[i - 1]) * (nums[i] - M[i])
        variances = np.zeros(n)
        variances[1:] = S[1:] / np.arange(1, n)
        stdevs = np.sqrt(variances)

        error1sig_random = volume * np.sqrt(
            (2**(-1 * dimension) - 3**(-1 * dimension)) / np.arange(1, n + 1))
        if samplemethod == SampleMethod.RANDOM:
            error1sig = error1sig_random
        elif samplemethod == SampleMethod.SOBOL:
            error1sig_sobol = volume * stdevs * np.log(np.arange(
                1, n + 1))**dimension / np.arange(1, n + 1)
            error1sig = error1sig_sobol

        # Leading zeros will throw off plots, fill with reasonable dummy data
        error1sig[error1sig == 0] = max(error1sig)

    error = error1sig * pct2sig(conf)
    return error
Beispiel #3
0
    def genStatsGaussianP(self) -> None:
        """
        Get the value of the variable at the inputted percentile value,
        assuming a gaussian distribution.
        """
        if 'p' not in self.statkwargs:
            raise ValueError(f'{self.stat} requires the kwarg ' 'p' '')
        if 'bound' not in self.statkwargs:
            self.bound = StatBound.TWOSIDED
        else:
            self.bound = self.statkwargs['bound']

        self.p = self.statkwargs['p']
        self.sig = pct2sig(self.p, bound=self.bound)
        self.setName(f'{self.var.name} Guassian {self.p*100}%')
        self.genStatsFunction(self.sigma, {'sig': self.sig})
def integration_n_from_err(
    error: float,
    dimension: int,
    volume: float,
    stdev: float,
    conf: float = 0.95,
    samplemethod: SampleMethod = SampleMethod.RANDOM,
) -> int:
    """
    Returns the bounding integration error for an input array of numbers. This
    error can be a float point estimate if runningerror == False, or a numpy
    array showing running error over the samples, to demonstrate convergence.
    If volume == 1, the error returned is a percent error. Otherwise, the error
    is absolute over the integration volume.
    We generally do not know a-priori what the standard deviation will be, so
    best practice is to set to the max range of values on the interval, and
    then calculate a better stdev on a lower number of cases, which can then
    be subsituted in here to bootleg a more efficient computation.
    For sobol sampling, remember to round n to the next power of 2 for balance.
    monaco.helper_functions.next_power_of_2(n) can help with this.

    Parameters
    ----------
    error : float
        The target error.
    dimension : int
        The number of dimensions in the integration volume, dimension > 0.
    volume : float
        The integration volume, > 0. If volume == 1 (default), then the target
        error is a percentage of the true integration volume.
    stdev : float
        The standard deviation of the integration estimates, stdev > 0. We
        generally do not know this a-priori, so use
        monaco.integration_statistics.max_stdev to calculate this in that
        instance. Or, do a limited number of cases to estimate this before
        performing the full run.
    conf : float
        Confidence level of the calculated error. This must be 0 < conf < 1,
        and should be 0.5 < conf < 1.
    samplemethod : monaco.mc_enums.SampleMethod
        Monte Carlo sample method. Either 'random' (default and bounding), or
        'sobol'. If using a different sample method, use 'random' here.

    Returns
    -------
    n : int
        The number of sample points required to meet the target integration
        error.
    """
    integration_args_check(error=error,
                           dimension=dimension,
                           volume=volume,
                           stdev=stdev,
                           conf=conf,
                           samplemethod=samplemethod)

    n_random = (volume * pct2sig(conf) * stdev / error)**2
    if samplemethod == SampleMethod.RANDOM:
        n = n_random
    elif samplemethod == SampleMethod.SOBOL:

        def f(n: float) -> float:
            return volume * stdev * pct2sig(conf) * np.log(
                n)**dimension / n - error

        try:
            rootResults = root_scalar(f,
                                      method='brentq',
                                      bracket=[2**8, 2**31 - 1],
                                      xtol=0.1,
                                      maxiter=int(1e3))
            n_sobol = rootResults.root
            n = np.min([n_random, n_sobol])
        except Exception:
            # For higher than 3 dimensions, reaching n may be difficult, and
            # will be much larger than n_random anyways
            # warn(f'Cannot reach error tolerance of ±{error}. ' +
            #      f'Falling back to samplemethod={SampleMethod.RANDOM}')
            n = n_random

    n = int(np.ceil(n))
    return n
 def f(n: float) -> float:
     return volume * stdev * pct2sig(conf) * np.log(
         n)**dimension / n - error
def integration_error(
    nums: np.ndarray,
    dimension: int,
    volume: float = 1,
    conf: float = 0.95,
    samplemethod: SampleMethod = SampleMethod.RANDOM,
    runningerror: bool = False,
) -> float | np.ndarray:
    """
    Returns the bounding integration error for an input array of numbers. This
    error can be a float point estimate if runningerror == False, or a numpy
    array showing running error over the samples, to demonstrate convergence.
    If volume == 1, the error returned is a percent error. Otherwise, the error
    is absolute over the integration volume.

    Parameters
    ----------
    nums : list[float]
        A list of the integration point estimates across the volume.
    dimension : int
        The number of dimensions in the integration volume, dimension > 0.
    volume : float
        The integration volume, > 0. If volume == 1 (default), then the error
        returned is a percentage of the true integration volume.
    conf : float
        Confidence level of the calculated error. This must be 0 < conf < 1,
        and should be 0.5 < conf < 1.
    samplemethod : monaco.mc_enums.SampleMethod
        Monte Carlo sample method. Either 'random' (default and bounding), or
        'sobol'. If using a different sample method, use 'random' here.
    runningerror : bool
        If False, returns a point estimate. If True, returns an array
        containing the running error over all of the integration estimates.

    Returns
    -------
    error : float | np.ndarray
        Either a point estimate of the error if runningerror == False, or an
        array of the running error if runningerror is True.
    """
    integration_args_check(error=None,
                           dimension=dimension,
                           volume=volume,
                           stdev=None,
                           conf=conf,
                           samplemethod=samplemethod)

    n = len(nums)
    if n == 1:
        error1sig = np.array(volume)

    elif not runningerror:
        stdev = np.std(nums, ddof=1)
        error1sig_random = volume * np.sqrt(
            (2**(-1 * dimension) - 3**(-1 * dimension)) / n)
        if samplemethod == SampleMethod.RANDOM:
            error1sig = error1sig_random
        elif samplemethod == SampleMethod.SOBOL:
            error1sig_sobol = volume * stdev * np.log(n)**dimension / n
            error1sig = np.minimum(error1sig_random, error1sig_sobol)

    else:
        # Use Welford's algorithm to calculate the running variance
        M = np.zeros(n)  # Running mean
        S = np.zeros(n)  # Sum of variances
        M[0] = nums[0]
        for i in range(1, n):
            M[i] = M[i - 1] + (nums[i] - M[i - 1]) / (i + 1)
            S[i] = S[i - 1] + (nums[i] - M[i - 1]) * (nums[i] - M[i])
        variances = np.zeros(n)
        variances[1:] = S[1:] / np.arange(1, n)
        stdevs = np.sqrt(variances)

        error1sig_random = volume * np.sqrt(
            (2**(-1 * dimension) - 3**(-1 * dimension)) / np.arange(1, n + 1))
        if samplemethod == SampleMethod.RANDOM:
            error1sig = error1sig_random
        elif samplemethod == SampleMethod.SOBOL:
            error1sig_sobol = volume * stdevs * np.log(np.arange(
                1, n + 1))**dimension / np.arange(1, n + 1)
            error1sig = np.minimum(error1sig_random, error1sig_sobol)

        # Leading zeros will throw off plots, fill with reasonable dummy data
        error1sig[error1sig == 0] = np.max(error1sig)

    error: float | np.ndarray = error1sig * pct2sig(conf)
    return error