예제 #1
0
 def initialStepSize(self, numOptVars=None, scaling=0.05, **kwargs):
   """
     Provides an initial step size
     @ In, numOptVars, int, number of optimization variables
     @ In, scaling, float, optional, scaling factor
   """
   return mathUtils.hyperdiagonal(np.ones(numOptVars) * scaling)
예제 #2
0
    def localInputAndChecks(self, xmlNode):
        """
      Local method for additional reading.
      @ In, xmlNode, xml.etree.ElementTree.Element, Xml element node
      @ Out, None
    """
        GradientBasedOptimizer.localInputAndChecks(self, xmlNode)
        self.paramDict['alpha'] = float(self.paramDict.get('alpha', 0.602))
        self.paramDict['gamma'] = float(self.paramDict.get('gamma', 0.101))
        self.paramDict['A'] = float(
            self.paramDict.get('A', self.limit['mdlEval'] / 10.))
        self.paramDict['a'] = self.paramDict.get('a', None)
        self.paramDict['c'] = float(self.paramDict.get('c', 0.005))
        #FIXME the optimization parameters should probably all operate ONLY on normalized data!
        #  -> perhaps the whole optimizer should only work on optimized data.

        #FIXME normalizing doesn't seem to have the desired effect, currently; it makes the step size very small (for large scales)
        #if "a" was defaulted, use the average scale of the input space.
        #This is the suggested value from the paper, missing a 1/gradient term since we don't know it yet.
        if self.paramDict['a'] is None:
            self.paramDict['a'] = mathUtils.hyperdiagonal(
                np.ones(len(
                    self.getOptVars())))  # the features are always normalized
            self.raiseAMessage('Defaulting "a" gradient parameter to',
                               self.paramDict['a'])
        else:
            self.paramDict['a'] = float(self.paramDict['a'])

        self.constraintHandlingPara['innerBisectionThreshold'] = float(
            self.paramDict.get('innerBisectionThreshold', 1e-2))
        self.constraintHandlingPara['innerLoopLimit'] = float(
            self.paramDict.get('innerLoopLimit', 1000))

        self.gradDict['pertNeeded'] = self.gradDict['numIterForAve'] * 2

        stochDist = self.paramDict.get('stochasticDistribution', 'Hypersphere')
        if stochDist == 'Bernoulli':
            self.stochasticDistribution = Distributions.returnInstance(
                'Bernoulli', self)
            self.stochasticDistribution.p = 0.5
            self.stochasticDistribution.initializeDistribution()
            # Initialize bernoulli distribution for random perturbation. Add artificial noise to avoid that specular loss functions get false positive convergence
            # FIXME there has to be a better way to get two random numbers
            self.stochasticEngine = lambda: [
                (0.5 + randomUtils.random() * (1. + randomUtils.random(
                ) / 1000. * randomUtils.randomIntegers(-1, 1, self)))
                if self.stochasticDistribution.rvs() == 1 else -1. *
                (0.5 + randomUtils.random() * (1. + randomUtils.random(
                ) / 1000. * randomUtils.randomIntegers(-1, 1, self)))
                for _ in range(len(self.getOptVars()))
            ]
        elif stochDist == 'Hypersphere':
            self.stochasticEngine = lambda: randomUtils.randPointsOnHypersphere(
                len(self.getOptVars()))
        else:
            self.raiseAnError(
                IOError, self.paramDict['stochasticEngine'] +
                'is currently not supported for SPSA')
예제 #3
0
  def localInputAndChecks(self, xmlNode, paramInput):
    """
      Local method for additional reading.
      @ In, xmlNode, xml.etree.ElementTree.Element, Xml element node
      @ In, paramInput, InputData.ParameterInput, the parsed parameters
      @ Out, None
    """
    GradientBasedOptimizer.localInputAndChecks(self, xmlNode, paramInput)
    self.currentDirection   = None
    numValues = self._numberOfSamples()
    # set the initial step size
    ## use the hyperdiagonal of a unit hypercube with a side length equal to the user's provided initialStepSize * 1.0
    stepPercent = float(self.paramDict.get('initialStepSize', 0.05))
    self.paramDict['initialStepSize'] = mathUtils.hyperdiagonal(np.ones(numValues)*stepPercent)
    self.raiseADebug('Based on initial step size factor of "{:1.5e}", initial step size is "{:1.5e}"'
                         .format(stepPercent, self.paramDict['initialStepSize']))
    # set the perturbation distance
    ## if not given, default to 10% of the step size
    self.paramDict['pertDist'] = float(self.paramDict.get('perturbationDistance',0.01))
    self.raiseADebug('Perturbation distance is "{:1.5e}" percent of the step size'
                         .format(self.paramDict['pertDist']))

    self.constraintHandlingPara['innerBisectionThreshold'] = float(self.paramDict.get('innerBisectionThreshold', 1e-2))
    if not 0 < self.constraintHandlingPara['innerBisectionThreshold'] < 1:
      self.raiseAnError(IOError,'innerBisectionThreshold must be between 0 and 1; got',self.constraintHandlingPara['innerBisectionThreshold'])
    self.constraintHandlingPara['innerLoopLimit'] = float(self.paramDict.get('innerLoopLimit', 1000))

    self.gradDict['pertNeeded'] = self.gradDict['numIterForAve'] * (self.paramDict['pertSingleGrad']+1)

    # determine the number of indpendent variables (scalar and vectors included)
    stochDist = self.paramDict.get('stochasticDistribution', 'Hypersphere')
    if stochDist == 'Bernoulli':
      self.stochasticDistribution = Distributions.returnInstance('Bernoulli',self)
      self.stochasticDistribution.p = 0.5
      self.stochasticDistribution.initializeDistribution()
      # Initialize bernoulli distribution for random perturbation. Add artificial noise to avoid that specular loss functions get false positive convergence
      # FIXME there has to be a better way to get two random numbers
      self.stochasticEngine = lambda: [(0.5+randomUtils.random()*(1.+randomUtils.random()/1000.*randomUtils.randomIntegers(-1, 1, self))) if self.stochasticDistribution.rvs() == 1 else
                                   -1.*(0.5+randomUtils.random()*(1.+randomUtils.random()/1000.*randomUtils.randomIntegers(-1, 1, self))) for _ in range(numValues)]
    elif stochDist == 'Hypersphere':
      # TODO assure you can't get a "0" along any dimension! Need to be > 1e-15. Right now it's just highly unlikely.
      self.stochasticEngine = lambda: randomUtils.randPointsOnHypersphere(numValues) if numValues > 1 else [randomUtils.randPointsOnHypersphere(numValues)]
    else:
      self.raiseAnError(IOError, self.paramDict['stochasticEngine']+'is currently not supported for SPSA')
예제 #4
0
factors = mathUtils.normalizationFactors(fourList, mode='scale')
checkArray('0-1 scaling fourList: ', factors, (4,4))
factors = mathUtils.normalizationFactors(fourList, mode='none')
checkArray('No scaling fourList: ', factors, (0,1))

factors = mathUtils.normalizationFactors(sequentialList, mode='z')
checkArray('Z-score normalization sequentialList: ', factors, (2,1.41421356237))
factors = mathUtils.normalizationFactors(sequentialList, mode='scale')
checkArray('0-1 scaling sequentialList: ', factors, (0,4))
factors = mathUtils.normalizationFactors(sequentialList, mode='none')
checkArray('No scaling sequentialList: ', factors,(0,1))

#check hyperrectangle diagonal on several dimensions
## 2d
sideLengths = [3,4]
checkAnswer('2D hyperdiagonal',mathUtils.hyperdiagonal(sideLengths),5)
## 3d
sideLengths.append(12)
checkAnswer('3D hyperdiagonal',mathUtils.hyperdiagonal(sideLengths),13)
## 3d
sideLengths.append(84)
checkAnswer('4D hyperdiagonal',mathUtils.hyperdiagonal(sideLengths),85)


print(results)

sys.exit(results["fail"])
"""
  <TestInfo>
    <name>framework.mathUtils</name>
    <author>talbpaul</author>