Пример #1
0
 def at( x, mean = 0.0, standardDeviation = 1.0) :
     '''
     See http://mathworld.wolfram.com/NormalDistribution.html
                     1              -(x-mean)^2 / (2*stdDev^2)
      P(x) = ------------------- * e
            stdDev * sqrt(2*pi)
         '''
     multiplier = 1.0/(standardDeviation*sqrt(2.0*PI))
     expPart = exp((-1.0*square(x - mean))/(2.0*square(standardDeviation)))
     result = multiplier*expPart;
     return result
Пример #2
0
 def at(x, mean=0.0, standardDeviation=1.0):
     '''
     See http://mathworld.wolfram.com/NormalDistribution.html
                     1              -(x-mean)^2 / (2*stdDev^2)
      P(x) = ------------------- * e
            stdDev * sqrt(2*pi)
         '''
     multiplier = 1.0 / (standardDeviation * sqrt(2.0 * PI))
     expPart = exp(
         (-1.0 * square(x - mean)) / (2.0 * square(standardDeviation)))
     result = multiplier * expPart
     return result
Пример #3
0
    def inverseErrorFunctionCumulativeTo( p) :
        ''' From page 265 of numerical recipes'''

        if (p >= 2.0) :
            return -100

        if (p <= 0.0) :
            return 100

        if p < 1.0 :
            pp = p
        else :
            pp = 2 - p
        
        t = sqrt(-2*log(pp/2.0)); # Initial guess
        x = -0.70711*((2.30753 + t*0.27061)/(1.0 + t*(0.99229 + t*0.04481)) - t)

        for j in range(2) :
            err = GaussianDistribution.errorFunctionCumulativeTo(x) - pp
            x = x + (err/(1.12837916709551257*exp(-square(x)) - x*err)) # // Halley                

        if p < 1.0 :
            return x
        else :
            return -x
Пример #4
0
    def inverseErrorFunctionCumulativeTo(p):
        ''' From page 265 of numerical recipes'''

        if (p >= 2.0):
            return -100

        if (p <= 0.0):
            return 100

        if p < 1.0:
            pp = p
        else:
            pp = 2 - p

        t = sqrt(-2 * log(pp / 2.0))
        # Initial guess
        x = -0.70711 * ((2.30753 + t * 0.27061) /
                        (1.0 + t * (0.99229 + t * 0.04481)) - t)

        for j in range(2):
            err = GaussianDistribution.errorFunctionCumulativeTo(x) - pp
            x = x + (err / (1.12837916709551257 * exp(-square(x)) - x * err)
                     )  # // Halley

        if p < 1.0:
            return x
        else:
            return -x
Пример #5
0
    def logProductNormalization(left, right) :

        if ((left._precision == 0) or (right._precision == 0)) :
            return 0


        varianceSum = left._variance + right._variance
        meanDifference = left._mean - right._mean

        logSqrt2Pi = log(sqrt(2*PI));
        return -logSqrt2Pi - (log(varianceSum)/2.0) - (square(meanDifference)/(2.0*varianceSum))
Пример #6
0
    def logProductNormalization(left, right):

        if ((left._precision == 0) or (right._precision == 0)):
            return 0

        varianceSum = left._variance + right._variance
        meanDifference = left._mean - right._mean

        logSqrt2Pi = log(sqrt(2 * PI))
        return -logSqrt2Pi - (log(varianceSum) /
                              2.0) - (square(meanDifference) /
                                      (2.0 * varianceSum))
Пример #7
0
    def logRatioNormalization(numerator, denominator):

        if ((numerator._precision == 0) or (denominator._precision == 0)):
            return 0

        varianceDifference = denominator._variance - numerator._variance
        meanDifference = numerator._mean - denominator._mean

        logSqrt2Pi = log(sqrt(2 * PI))

        return log(denominator._variance) + logSqrt2Pi - log(
            varianceDifference) / 2.0 + square(meanDifference) / (
                2 * varianceDifference)
Пример #8
0
    def __init__(self, mean=0.0, standardDeviation=1.0):

        self._mean = mean
        self._standardDeviation = standardDeviation

        #precision and precisionMean are used because they make multiplying and dividing simpler
        #(the the accompanying math paper for more details)
        self._variance = square(standardDeviation)
        self._precision = None
        self._precisionMean = None

        if self._variance != 0.0:
            self._precision = 1 / self._variance
            self._precisionMean = self._precision * self._mean

        else:
            self._precision = INF

            if self._mean == 0.0:
                self._precisionMean = 0.0
            else:
                self._precisionMean = INF
Пример #9
0
    def __init__(self, mean = 0.0, standardDeviation = 1.0):

        self._mean = mean
        self._standardDeviation = standardDeviation
      
        #precision and precisionMean are used because they make multiplying and dividing simpler
        #(the the accompanying math paper for more details)
        self._variance = square(standardDeviation)
        self._precision = None
        self._precisionMean = None
    
        
        if self._variance != 0.0 :
            self._precision = 1/self._variance
            self._precisionMean = self._precision * self._mean
            
        else :
            self._precision = INF
            
            if self._mean == 0.0 :
                self._precisionMean = 0.0
            else :
                self._precisionMean = INF
Пример #10
0
    def logRatioNormalization(numerator, denominator) :

        if ((numerator._precision == 0) or (denominator._precision == 0)) :
            return 0;

        varianceDifference = denominator._variance - numerator._variance
        meanDifference = numerator._mean - denominator._mean

        logSqrt2Pi = log(sqrt(2*PI));

        return log(denominator._variance) + logSqrt2Pi - log(varianceDifference)/2.0 + square(meanDifference)/(2*varianceDifference)