Beispiel #1
0
 def _checkConvObjective(self, traj):
     """
   Checks the change in objective for convergence
   @ In, traj, int, trajectory identifier
   @ Out, converged, bool, convergence state
 """
     if len(self._optPointHistory[traj]) < 2:
         return False
     o1, _ = self._optPointHistory[traj][-1]
     o2, _ = self._optPointHistory[traj][-2]
     delta = mathUtils.relativeDiff(o2[self._objectiveVar],
                                    o1[self._objectiveVar])
     converged = abs(delta) < self._convergenceCriteria['objective']
     self.raiseADebug(
         self.convFormat.format(name='objective',
                                conv=str(converged),
                                got=delta,
                                req=self._convergenceCriteria['objective']))
     return converged
    def _updateConvergenceVector(self, traj, varsUpdate, currentLossVal):
        """
      Local method to update convergence vector.
      @ In, traj, int, identifier of the trajector to update
      @ In, varsUpdate, int, current variables update iteration number
      @ In, currentLossVal, float, current loss function value
      @ Out, None
    """
        # first, check if we're at varsUpdate 0 (first entry); if so, we are at our first point
        if varsUpdate == 0:
            # we don't have enough points to decide to accept or reject the new point, so accept it as the initial point
            self.raiseADebug(
                'Accepting first point, since we have no rejection criteria.')
            self.status[traj]['reason'] = 'found new opt point'
            return

        #otherwise, we need to accept/reject point and check convergence
        currentInputDenorm = self.denormalizeData(
            self.optVarsHist[traj][self.counter['varsUpdate'][traj]])

        ## first, determine if we want to keep the new point
        # obtain the old loss value
        oldLossVal = self.counter['recentOptHist'][traj][0][self.objVar]
        # see if new point is better than old point
        newerIsBetter = self.checkIfBetter(currentLossVal, oldLossVal)
        # if this was a recommended preconditioning point, we should not be converged.
        pointFromRecommendation = self.status[traj][
            'reason'] == 'received recommended point'
        # if improved, keep it and move forward; otherwise, reject it and recommend cutting step size
        if newerIsBetter:
            self.status[traj]['reason'] = 'found new opt point'
            self.raiseADebug(
                'Accepting potential opt point for improved loss value.  Diff: {}, New: {}, Old: {}'
                .format(abs(currentLossVal - oldLossVal), currentLossVal,
                        oldLossVal))
            #TODO REWORK this belongs in the base class optimizer; grad shouldn't know about multilevel!!
            #  -> this parameter is how multilevel knows that a successful perturbation of an outer loop has been performed
            #  maybe implement a "acceptPoint" method in base class?
            self.mlActiveSpaceSteps[traj] += 1
        else:
            self.status[traj]['reason'] = 'rejecting bad opt point'
            self.raiseADebug(
                'Rejecting potential opt point for worse loss value. old: "{}", new: "{}"'
                .format(oldLossVal, currentLossVal))
            # cut the next step size to hopefully stay in the valley instead of climb up the other side
            self.recommendToGain[traj] = 'cut'

        ## determine convergence
        if pointFromRecommendation:
            self.raiseAMessage(
                'Setting convergence for Trajectory "{}" to "False" because of preconditioning.'
                .format(traj))
            converged = False
        else:
            self.raiseAMessage(
                'Checking convergence for Trajectory "{}":'.format(traj))
            self.convergenceProgress[traj] = {
            }  # tracks progress for grad norm, abs, rel tolerances
            converged = False  # updated for each individual criterion using "or" (pass one, pass all)
            #printing utility
            printString = '    {:<21}: {:<5}'
            printVals = printString + ' (check: {:>+9.2e} < {:>+9.2e}, diff: {:>9.2e})'

            def printProgress(name, boolCheck, test, gold):
                """
          Consolidates a commonly-used print statement to prevent errors and improve readability.
          @ In, name, str, printed name of convergence check
          @ In, boolCheck, bool, boolean convergence results for this check
          @ In, test, float, value of check at current opt point
          @ In, gold, float, convergence threshold value
          @ Out, None
        """
                self.raiseAMessage(
                    printVals.format(name, str(boolCheck), test, gold,
                                     abs(test - gold)))

            # "min step size" and "gradient norm" are both always valid checks, whether rejecting or accepting new point

            # min step size check
            try:
                lastStep = self.counter['lastStepSize'][traj]
                minStepSizeCheck = lastStep <= self.minStepSize
            except KeyError:
                #we reset the step size, so we don't have a value anymore
                lastStep = np.nan
                minStepSizeCheck = False
            printProgress('Min step size', minStepSizeCheck, lastStep,
                          self.minStepSize)
            converged = converged or minStepSizeCheck

            # gradient norm
            if len(self.counter['gradientHistory'][traj][0]) > 0:
                gradNorm = self.counter['gradNormHistory'][traj][0]
                self.convergenceProgress[traj]['grad'] = gradNorm
                gradientNormCheck = gradNorm <= self.gradientNormTolerance
            else:
                gradNorm = np.nan
                gradientNormCheck = False
            printProgress('Gradient magnitude', gradientNormCheck, gradNorm,
                          self.gradientNormTolerance)
            converged = converged or gradientNormCheck

            # if accepting new point, then "same coordinate" and "abs" and "rel" checks are also valid reasons to converge
            if newerIsBetter:
                #absolute tolerance
                absLossDiff = abs(
                    mathUtils.diffWithInfinites(currentLossVal, oldLossVal))
                self.convergenceProgress[traj]['abs'] = absLossDiff
                absTolCheck = absLossDiff <= self.absConvergenceTol
                printProgress('Absolute Loss Diff', absTolCheck, absLossDiff,
                              self.absConvergenceTol)
                converged = converged or absTolCheck

                #relative tolerance
                relLossDiff = mathUtils.relativeDiff(currentLossVal,
                                                     oldLossVal)
                self.convergenceProgress[traj]['rel'] = relLossDiff
                relTolCheck = relLossDiff <= self.relConvergenceTol
                printProgress('Relative Loss Diff', relTolCheck, relLossDiff,
                              self.relConvergenceTol)
                converged = converged or relTolCheck

                #same coordinate check
                sameCoordinateCheck = True
                for var, values in self.optVarsHist[traj][varsUpdate].items():
                    # don't check constants, of course they're the same
                    if var in self.constants:
                        continue
                    # differentiate vectors and scalars for checking
                    if hasattr(values, '__len__'):
                        if any(values != self.counter['recentOptHist'][traj][0]
                               [var]):
                            sameCoordinateCheck = False
                            break
                    else:
                        if values != self.counter['recentOptHist'][traj][0][
                                var]:
                            sameCoordinateCheck = False
                            break
                self.raiseAMessage(
                    printString.format('Same coordinate check',
                                       str(sameCoordinateCheck)))
                converged = converged or sameCoordinateCheck

        if converged:
            # update number of successful convergences
            self.counter['persistence'][traj] += 1
            # check if we've met persistence requirement; if not, keep going
            if self.counter['persistence'][traj] >= self.convergencePersistence:
                self.raiseAMessage(
                    ' ... Trajectory "{}" converged {} times consecutively!'.
                    format(traj, self.counter['persistence'][traj]))
                self.convergeTraj[traj] = True
                self.removeConvergedTrajectory(traj)
            else:
                self.raiseAMessage(
                    ' ... converged Traj "{}" {} times, required persistence is {}.'
                    .format(traj, self.counter['persistence'][traj],
                            self.convergencePersistence))
        else:
            self.counter['persistence'][traj] = 0
            self.raiseAMessage(' ... continuing trajectory "{}".'.format(traj))
Beispiel #3
0
    dist = mathUtils.distance(points, f)
    checkArray('distance %s' % str(f), dist, dists[i], 1e-5)

### check "numpyNearestMatch"
findIn = np.array([(1, 1, 1), (2, 2, 2), (3, 3, 3)])
find = [(0, 0, 0), (1, 2, 1), (1, 2, 2), (2, 2, 2), (10, 10, 10)]
idcs = [0, 0, 1, 1, 2]
correct = [(1, 1, 1), (1, 1, 1), (2, 2, 2), (2, 2, 2), (3, 3, 3)]
for i, f in enumerate(find):
    idx, ary = mathUtils.numpyNearestMatch(findIn, f)
    checkAnswer('numpyNearersMatch %s' % str(f), idx, idcs[i], 1e-5)
    checkArray('numpyNearersMatch %s' % str(f), ary, correct[i], 1e-5)

### check relative differences
# similar order magnitude
checkAnswer('relativeDiff O(1)', mathUtils.relativeDiff(1.234, 1.233),
            0.00081103000811)
# large order magnitude
checkAnswer('relativeDiff O(1e10)', mathUtils.relativeDiff(1.234e10, 1.233e10),
            0.00081103000811)
# small order magnitude
checkAnswer('relativeDiff O(1e-10)',
            mathUtils.relativeDiff(1.234e-10, 1.233e-10), 0.00081103000811)
# different magnitudes
checkAnswer('relativeDiff different magnitudes',
            mathUtils.relativeDiff(1.234e10, 1.233e-10),
            1.00081103000811e20,
            tol=1e6)
# measured is 0
checkAnswer('relativeDiff first is zero', mathUtils.relativeDiff(0, 1.234),
            1.0)