Exemplo n.º 1
0
 def __init__(self):
     """
   Default Constructor that will initialize member variables with reasonable
   defaults or empty lists/dictionaries where applicable.
   @ In, None
   @ Out, None
 """
     AdaptiveSampler.__init__(self)
     self._initialValues = {
     }  # dict stores the user provided initial values, i.e. {var: val}
     self._updateValues = {
     }  # dict stores input variables values for the current MCMC iteration, i.e. {var:val}
     self._proposal = {
     }  # dict stores the proposal distributions for input variables, i.e. {var:dist}
     self._priorFuns = {
     }  # dict stores the prior functions for input variables, i.e. {var:fun}
     self._burnIn = 0  # integers indicate how many samples will be discarded
     self._likelihood = None  # stores the output from the likelihood
     self._logLikelihood = False  # True if the user provided likelihood is in log format
     self._availProposal = {
         'normal': Distributions.Normal(0.0, 1.0),
         'uniform': Distributions.Uniform(-1.0, 1.0)
     }  # available proposal distributions
     self._acceptDist = Distributions.Uniform(
         0.0, 1.0)  # uniform distribution for accept/rejection purpose
     self.toBeCalibrated = {}  # parameters that will be calibrated
     # assembler objects
     self.addAssemblerObject('proposal',
                             InputData.Quantity.zero_to_infinity)
     self.addAssemblerObject('probabilityFunction',
                             InputData.Quantity.zero_to_infinity)
Exemplo n.º 2
0
 def transformDistDict(self):
     """
   Performs distribution transformation
   If the method 'pca' is used in the variables transformation (i.e. latentVariables to manifestVariables), the corrrelated variables
   will be tranformed into uncorrelated variables with standard normal distributions. Thus, the dictionary of distributions will
   be also transformed.
   @ In, None
   @ Out, distDicts, dict, distribution dictionary {varName:DistributionObject}
 """
     # Generate a standard normal distribution, this is used to generate the sparse grid points and weights for multivariate normal
     # distribution if PCA is used.
     standardNormal = Distributions.Normal()
     standardNormal.messageHandler = self.messageHandler
     standardNormal.mean = 0.0
     standardNormal.sigma = 1.0
     standardNormal.initializeDistribution()
     distDicts = {}
     for varName in self.variables2distributionsMapping.keys():
         distDicts[varName] = self.distDict[varName]
     if self.variablesTransformationDict:
         for key, varsDict in self.variablesTransformationDict.items():
             if self.transformationMethod[key] == 'pca':
                 listVars = varsDict['latentVariables']
                 for var in listVars:
                     distDicts[var] = standardNormal
     return distDicts
Exemplo n.º 3
0
 def makeDistribution(self):
     """
   Used to make standardized distribution for this poly type.
   @ In, None
   @ Out, normal, Distribution, the normal distribution
 """
     normal = Distributions.Normal(0.0, 1.0)
     normal.initializeDistribution()
     return normal
Exemplo n.º 4
0
 def makeDistribution(self):
     """
   Used to make standardized distribution for this poly type.
   @ In, None
   @ Out, normal, Distribution, the normal distribution
 """
     normalElement = ET.Element("Normal")
     element = ET.Element("mean", {})
     element.text = "0"
     normalElement.append(element)
     element = ET.Element("sigma", {})
     element.text = "1"
     normalElement.append(element)
     normal = Distributions.Normal()
     normal._readMoreXML(normalElement)
     normal.initializeDistribution()
     return normal
Exemplo n.º 5
0
## Should these be checked?
initParams = puniform.getInitParams()

for _ in range(10):
    Distributions.randomIntegers(0, 1, uniform)

Distributions.randomIntegers(2, 1, uniform)

#Test Normal
mean = 1.0
sigma = 2.0
normalElement = ET.Element("Normal")
normalElement.append(createElement("mean", text="%f" % mean))
normalElement.append(createElement("sigma", text="%f" % sigma))

normal = Distributions.Normal()
normal._readMoreXML(normalElement)
normal.initializeDistribution()

#check pickled version as well
pk.dump(normal, open('testDistrDump.pk', 'wb'))
pnormal = pk.load(open('testDistrDump.pk', 'rb'))

checkCrowDist("normal", normal, {
    'mu': 1.0,
    'sigma': 2.0,
    'type': 'NormalDistribution'
})
checkCrowDist("pnormal", pnormal, {
    'mu': 1.0,
    'sigma': 2.0,
Exemplo n.º 6
0
    simple, simple_prob, 12, True, "linear")
print(stats)
print(cdf(0.0), cdf(32.0), cdf(64.0))
assert 0.4 < cdf(32.0) < 0.6

low, high = Metrics.MetricUtilities._getBounds({
    "low": 1.0,
    "high": 3.0
}, {
    "low": 2.0,
    "high": 2.5
})
assert low == 1.0
assert high == 3.0

dist1 = Distributions.Normal(0.0, 1.0)
dist1.initializeDistribution()

dist2 = Distributions.Normal(1.0, 1.0)
dist2.initializeDistribution()

#Test same
cdfAreaDifference = Metrics.MetricUtilities._getCDFAreaDifference(dist1, dist1)

print("cdfAreaDifference same", cdfAreaDifference)
assert -1e-3 < cdfAreaDifference < 1e-3

pdfCommonArea = Metrics.MetricUtilities._getPDFCommonArea(dist1, dist1)

print("pdfCommonArea same", pdfCommonArea)
assert 0.99 < pdfCommonArea < 1.01