예제 #1
0
파일: Optimizer.py 프로젝트: mattdon/raven
 def getLossFunctionGivenId(self, evaluationID):
   """
     Method to get the Loss Function value given an evaluation ID
     @ In, evaluationID, string, the evaluation identifier (prefix)
     @ Out, functionValue, float, the loss function value
   """
   objective  = self.mdlEvalHist.getParametersValues('outputs', nodeId = 'RecontructEnding')[self.objVar]
   prefix = self.mdlEvalHist.getMetadata('prefix',nodeId='RecontructEnding')
   if len(prefix) > 0 and utils.returnIdSeparator() in prefix[0]:
     # ensemble model id modification
     # FIXME: Need to find a better way to handle this case
     prefix = [key.split(utils.returnIdSeparator())[-1] for key in prefix]
   search = dict(zip(prefix, objective))
   functionValue = search.get(evaluationID,None)
   return functionValue
예제 #2
0
 def createNewInput(self,myInput,samplerType,**kwargs):
   """
     This function will return a new input to be submitted to the model, it is called by the sampler.
     @ In, myInput, list, the inputs (list) to start from to generate the new one
     @ In, samplerType, string, is the type of sampler that is calling to generate a new input
     @ In, **kwargs, dict,  is a dictionary that contains the information coming from the sampler,
          a mandatory key is the sampledVars'that contains a dictionary {'name variable':value}
     @ Out, newInputs, dict, dict that returns the new inputs for each sub-model
   """
   self.raiseADebug("Create New Input")
   useROM = kwargs['useROM']
   if useROM:
     identifier = kwargs['prefix']
     newKwargs = {'prefix':identifier, 'useROM':useROM}
     for romName in self.romsDictionary.keys():
       newKwargs[romName] = self.__selectInputSubset(romName, kwargs)
       newKwargs[romName]['prefix'] = romName+utils.returnIdSeparator()+identifier
       newKwargs[romName]['uniqueHandler'] = self.name+identifier
   else:
     newKwargs = copy.deepcopy(kwargs)
   if self.modelInstance.type == 'Code':
     codeInput = []
     romInput = []
     for elem in myInput:
       if isinstance(elem, Files.File):
         codeInput.append(elem)
       elif elem.type in ['PointSet', 'HistorySet']:
         romInput.append(elem)
       else:
         self.raiseAnError(IOError, "The type of input ", elem.name, " can not be accepted!")
     if useROM:
       return (romInput, samplerType, newKwargs)
     else:
       return (codeInput, samplerType, newKwargs)
   return (myInput, samplerType, newKwargs)
예제 #3
0
    def createNewInput(self, myInput, samplerType, **kwargs):
        """
      This function will return a new input to be submitted to the model, it is called by the sampler.
      @ In, myInput, list, the inputs (list) to start from to generate the new one
      @ In, samplerType, string, is the type of sampler or optimizer that is calling to generate a new input
      @ In, **kwargs, dict,  is a dictionary that contains the information coming from the sampler,
           a mandatory key is the sampledVars'that contains a dictionary {'name variable':value}
      @ Out, newInputs, dict, dict that returns the new inputs for each sub-model
    """
        # check if all the inputs of the submodule are covered by the sampled vars and Outputs of the other sub-models
        if self.needToCheckInputs:
            allCoveredVariables = list(
                set(
                    itertools.chain(self.allOutputs,
                                    kwargs['SampledVars'].keys())))

        identifier = kwargs['prefix']
        # global prefix
        newKwargs = {'prefix': identifier}

        newInputs = {}

        ## First check the inputs if they need to be checked
        if self.needToCheckInputs:
            for modelIn, specs in self.modelsDictionary.items():
                for inp in specs['Input']:
                    if inp not in allCoveredVariables:
                        self.raiseAnError(
                            RuntimeError,
                            "for sub-model " + modelIn + " the input " + inp +
                            " has not been found among other models' outputs and sampled variables!"
                        )

        ## Now prepare the new inputs for each model
        for modelIn, specs in self.modelsDictionary.items():
            newKwargs[modelIn] = self.__selectInputSubset(modelIn, kwargs)

            # if specs['Instance'].type != 'Code':
            #   inputDict = [self._inputToInternal(self.modelsDictionary[modelIn]['InputObject'][0],newKwargs['SampledVars'].keys())]
            # else:
            #   inputDict = self.modelsDictionary[modelIn]['InputObject']

            # local prefix
            newKwargs[modelIn]['prefix'] = modelIn + utils.returnIdSeparator(
            ) + identifier
            newInputs[modelIn] = self.modelsDictionary[modelIn]['InputObject']

            # if specs['Instance'].type == 'Code':
            #   newInputs[modelIn][1]['originalInput'] = inputDict

        self.needToCheckInputs = False
        return (newInputs, samplerType, newKwargs)
예제 #4
0
    def _externalRun(self, inRun, jobHandler):
        """
      Method that performs the actual run of the logical model (separated from run method for parallelization purposes)
      @ In, inRun, tuple, tuple of Inputs (inRun[0] actual input, inRun[1] type of sampler,
        inRun[2] dictionary that contains information coming from sampler)
      @ In, jobHandler, instance, instance of jobHandler
      @ Out, exportDict, dict, dict of results from this hybrid model
    """
        self.raiseADebug("{}: External Run".format(self.name))
        originalInput = inRun[0]
        samplerType = inRun[1]
        inputKwargs = inRun[2]
        identifier = inputKwargs.pop('prefix')
        # TODO: execute control function, move this to createNewInput
        modelToRun = inputKwargs.pop('modelToRun')
        inputKwargs['prefix'] = modelToRun + utils.returnIdSeparator(
        ) + identifier
        inputKwargs['uniqueHandler'] = self.name + identifier

        moveOn = False
        while not moveOn:
            if jobHandler.availability() > 0:
                self.modelInstances[modelToRun].submit(originalInput,
                                                       samplerType, jobHandler,
                                                       **inputKwargs)
                self.raiseADebug("Job submitted for model", modelToRun,
                                 "with identifier", identifier)
                moveOn = True
            else:
                time.sleep(self.sleepTime)
        while not jobHandler.isThisJobFinished(inputKwargs['prefix']):
            time.sleep(self.sleepTime)
        self.raiseADebug("Job finished", modelToRun, "with identifier",
                         identifier)
        finishedRun = jobHandler.getFinished(
            jobIdentifier=inputKwargs['prefix'],
            uniqueHandler=inputKwargs['uniqueHandler'])
        evaluation = finishedRun[0].getEvaluation()
        if isinstance(evaluation, rerror):
            self.raiseAnError(RuntimeError, "The model", modelToRun,
                              "identified by", finishedRun[0].identifier,
                              "failed!")
        # collect output in temporary data object
        exportDict = evaluation
        self.raiseADebug("{}: Create exportDict".format(self.name))
        return exportDict
예제 #5
0
  def _checkModelFinish(self, traj, updateKey, evalID):
    """
      Determines if the Model has finished running an input and returned the output
      @ In, traj, int, traj on which the input is being checked
      @ In, updateKey, int, the id of variable update on which the input is being checked
      @ In, evalID, int or string, indicating the id of the perturbation (int) or its a variable update (string 'v')
      @ Out, _checkModelFinish, tuple(bool, int), (1,realization dictionary),
            (indicating whether the Model has finished the evaluation over input identified by traj+updateKey+evalID, the index of the location of the input in dataobject)
    """
    if self.mdlEvalHist.isItEmpty():    return False

    prefix = self.mdlEvalHist.getMetadata('prefix')
    for index, pr in enumerate(prefix):
      pr = pr.split(utils.returnIdSeparator())[-1].split('_')
      # use 'prefix' to locate the input sent out. The format is: trajID + iterID + (v for variable update; otherwise id for gradient evaluation) + global ID
      if pr[0] == str(traj) and pr[1] == str(updateKey) and pr[2] == str(evalID):
        return (True, index)
    return (False, -1)
예제 #6
0
    def _externalRun(self, inRun, jobHandler):
        """
      Method that performs the actual run of the essembled model (separated from run method for parallelization purposes)
      @ In, inRun, tuple, tuple of Inputs, e.g. inRun[0]: actual dictionary of input, inRun[1]: string,
        the type of Sampler or Optimizer, inRun[2], dict, contains the information from the Sampler
      @ In, jobHandler, object, instance of jobHandler
      @ Out, returnEvaluation, tuple, the results of the essembled model:
                               - returnEvaluation[0] dict of results from each sub-model,
                               - returnEvaluation[1] the dataObjects where the projection of each model is stored
                               - returnEvaluation[2] dict used to store the optional outputs
    """
        originalInput = inRun[0]
        samplerType = inRun[1]
        inputKwargs = inRun[2]
        identifier = inputKwargs.pop('prefix')
        tempOutputs = {}
        inRunTargetEvaluations = {}

        for modelIn in self.orderList:
            # reset the DataObject for the projection
            self.localTargetEvaluations[modelIn].reset()
            inRunTargetEvaluations[modelIn] = copy.copy(
                self.localTargetEvaluations[modelIn])
        residueContainer = dict.fromkeys(self.modelsDictionary.keys())
        gotOutputs = [{}] * len(self.orderList)
        typeOutputs = [''] * len(self.orderList)

        # if nonlinear system, initialize residue container
        if self.activatePicard:
            for modelIn in self.orderList:
                residueContainer[modelIn] = {
                    'residue': {},
                    'iterValues': [{}] * 2
                }
                for out in self.modelsDictionary[modelIn]['Output']:
                    residueContainer[modelIn]['residue'][out] = np.zeros(1)
                    residueContainer[modelIn]['iterValues'][0][out] = np.zeros(
                        1)
                    residueContainer[modelIn]['iterValues'][1][out] = np.zeros(
                        1)

        maxIterations = self.maxIterations if self.activatePicard else 1
        iterationCount = 0
        while iterationCount < maxIterations:
            returnDict = {}
            iterationCount += 1

            if self.activatePicard:
                self.raiseAMessage("Picard's Iteration " + str(iterationCount))

            for modelCnt, modelIn in enumerate(self.orderList):
                # clear the model's Target Evaluation data object
                # in case there are metadataToTransfer, let's collect them from the source
                metadataToTransfer = None
                if self.modelsDictionary[modelIn]['metadataToTransfer']:
                    metadataToTransfer = {}
                for metadataToGet, source, alias in self.modelsDictionary[
                        modelIn]['metadataToTransfer']:
                    if metadataToGet in returnDict[source]['general_metadata']:
                        metadataToTransfer[
                            metadataToGet
                            if alias is None else alias] = returnDict[source][
                                'general_metadata'][metadataToGet]
                    elif metadataToGet in returnDict[source][
                            'general_metadata']:
                        metadataToTransfer[metadataToGet if alias is None else
                                           alias] = returnDict[source][
                                               'response'][metadataToGet]
                    else:
                        self.raiseAnError(
                            RuntimeError, 'metadata "' + metadataToGet +
                            '" is not present among the ones available in source "'
                            + source + '"!')
                # get dependent outputs
                dependentOutput = self.__retrieveDependentOutput(
                    modelIn, gotOutputs, typeOutputs)
                # if nonlinear system, check for initial coditions
                if iterationCount == 1 and self.activatePicard:
                    sampledVars = inputKwargs[modelIn]['SampledVars'].keys()
                    conditionsToCheck = set(
                        self.modelsDictionary[modelIn]['Input']) - set(
                            dependentOutput.keys() + sampledVars)
                    for initialConditionToSet in conditionsToCheck:
                        if initialConditionToSet in self.initialConditions.keys(
                        ):
                            dependentOutput[
                                initialConditionToSet] = self.initialConditions[
                                    initialConditionToSet]
                        else:
                            self.raiseAnError(
                                IOError,
                                "No initial conditions provided for variable "
                                + initialConditionToSet)
                # set new identifiers
                inputKwargs[modelIn][
                    'prefix'] = modelIn + utils.returnIdSeparator(
                    ) + identifier
                inputKwargs[modelIn]['uniqueHandler'] = self.name + identifier
                if metadataToTransfer is not None:
                    inputKwargs[modelIn][
                        'metadataToTransfer'] = metadataToTransfer

                for key, value in dependentOutput.items():
                    inputKwargs[modelIn]["SampledVars"][key] = dependentOutput[
                        key]
                    ## FIXME it is a mistake (Andrea). The SampledVarsPb for this variable should be transferred from outside
                    ## Who has this information? -- DPM 4/11/17
                    inputKwargs[modelIn]["SampledVarsPb"][key] = 1.0
                self._replaceVariablesNamesWithAliasSystem(
                    inputKwargs[modelIn]["SampledVars"], 'input', False)
                self._replaceVariablesNamesWithAliasSystem(
                    inputKwargs[modelIn]["SampledVarsPb"], 'input', False)

                nextModel = False
                while not nextModel:
                    moveOn = False
                    while not moveOn:
                        if jobHandler.availability() > 0:
                            # run the model
                            #if modelIn not in modelsOnHold:
                            self.raiseADebug('Submitting model', modelIn)
                            self.modelsDictionary[modelIn]['Instance'].submit(
                                originalInput[modelIn], samplerType,
                                jobHandler, **inputKwargs[modelIn])
                            # wait until the model finishes, in order to get ready to run the subsequential one
                            while not jobHandler.isThisJobFinished(
                                    modelIn + utils.returnIdSeparator() +
                                    identifier):
                                time.sleep(1.e-3)
                            nextModel = moveOn = True
                        else:
                            time.sleep(1.e-3)
                    # store the results in the working dictionaries
                        returnDict[modelIn] = {}
                    #if modelIn not in modelsOnHold:
                    # get job that just finished to gather the results
                    finishedRun = jobHandler.getFinished(
                        jobIdentifier=modelIn + utils.returnIdSeparator() +
                        identifier,
                        uniqueHandler=self.name + identifier)
                    evaluation = finishedRun[0].getEvaluation()
                    if isinstance(evaluation, Runners.Error):
                        # the model failed
                        for modelToRemove in self.orderList:
                            if modelToRemove != modelIn:
                                jobHandler.getFinished(
                                    jobIdentifier=modelToRemove +
                                    utils.returnIdSeparator() + identifier,
                                    uniqueHandler=self.name + identifier)
                        self.raiseAnError(
                            RuntimeError,
                            "The Model  " + modelIn + " identified by " +
                            finishedRun[0].identifier + " failed!")
                    # store the output dictionary
                    tempOutputs[modelIn] = copy.deepcopy(evaluation)
                    # collect the target evaluation
                    #if modelIn not in modelsOnHold:
                    self.modelsDictionary[modelIn]['Instance'].collectOutput(
                        finishedRun[0], inRunTargetEvaluations[modelIn])
                    ## FIXME: The call asDataset() is unuseful here. It must be done because otherwise the realization(...) method from collector
                    ## does not return the indexes values (TO FIX)
                    inRunTargetEvaluations[modelIn].asDataset()
                    # get realization
                    dataSet = inRunTargetEvaluations[modelIn].realization(
                        index=iterationCount - 1, unpackXArray=True)
                    ##FIXME: the following dict construction is a temporary solution since the realization method returns scalars if we have a PointSet
                    dataSet = {
                        key: np.atleast_1d(dataSet[key])
                        for key in dataSet
                    }
                    responseSpace = dataSet
                    typeOutputs[modelCnt] = inRunTargetEvaluations[
                        modelIn].type
                    gotOutputs[modelCnt] = {
                        key: dataSet[key]
                        for key in
                        inRunTargetEvaluations[modelIn].getVars("output") +
                        inRunTargetEvaluations[modelIn].getVars("indexes")
                    }

                    #store the results in return dictionary
                    # store the metadata
                    returnDict[modelIn]['response'] = evaluation
                    # overwrite with target evaluation filtering
                    returnDict[modelIn]['response'].update(responseSpace)
                    returnDict[modelIn]['prefix'] = np.atleast_1d(identifier)
                    returnDict[modelIn][
                        'general_metadata'] = inRunTargetEvaluations[
                            modelIn].getMeta(general=True)
                    # if nonlinear system, compute the residue
                    if self.activatePicard:
                        residueContainer[modelIn]['iterValues'][1] = copy.copy(
                            residueContainer[modelIn]['iterValues'][0])
                        for out in inRunTargetEvaluations[modelIn].getVars(
                                "output"):
                            residueContainer[modelIn]['iterValues'][0][
                                out] = copy.copy(gotOutputs[modelCnt][out])
                            if iterationCount == 1:
                                residueContainer[modelIn]['iterValues'][1][
                                    out] = np.zeros(
                                        len(residueContainer[modelIn]
                                            ['iterValues'][0][out]))
                        for out in gotOutputs[modelCnt].keys():
                            residueContainer[modelIn]['residue'][out] = abs(
                                np.asarray(residueContainer[modelIn]
                                           ['iterValues'][0][out]) -
                                np.asarray(residueContainer[modelIn]
                                           ['iterValues'][1][out]))
                        residueContainer[modelIn]['Norm'] = np.linalg.norm(
                            np.asarray(residueContainer[modelIn]['iterValues']
                                       [1].values()) -
                            np.asarray(residueContainer[modelIn]['iterValues']
                                       [0].values()))

            # if nonlinear system, check the total residue and convergence
            if self.activatePicard:
                iterZero = []
                iterOne = []
                for modelIn in self.orderList:
                    iterZero += residueContainer[modelIn]['iterValues'][
                        0].values()
                    iterOne += residueContainer[modelIn]['iterValues'][
                        1].values()
                residueContainer['TotalResidue'] = np.linalg.norm(
                    np.asarray(iterOne) - np.asarray(iterZero))
                self.raiseAMessage("Picard's Iteration Norm: " +
                                   str(residueContainer['TotalResidue']))
                residualPass = residueContainer[
                    'TotalResidue'] <= self.convergenceTol
                # sometimes there can be multiple residual values
                if hasattr(residualPass, '__len__'):
                    residual = all(residualPass)
                if residualPass:
                    self.raiseAMessage("Picard's Iteration converged. Norm: " +
                                       str(residueContainer['TotalResidue']))
                    break
        returnEvaluation = returnDict, inRunTargetEvaluations, tempOutputs
        return returnEvaluation
예제 #7
0
 def _externalRun(self,inRun, jobHandler):
   """
     Method that performs the actual run of the essembled model (separated from run method for parallelization purposes)
     @ In, inRun, tuple, tuple of Inputs (inRun[0] actual input, inRun[1] type of sampler,
       inRun[2] dictionary that contains information coming from sampler)
     @ In, jobHandler, instance, instance of jobHandler
     @ Out, exportDict, dict, dict of results from this hybrid model
   """
   self.raiseADebug("External Run")
   originalInput = inRun[0]
   samplerType = inRun[1]
   inputKwargs = inRun[2]
   identifier = inputKwargs.pop('prefix')
   useROM = inputKwargs.pop('useROM')
   uniqueHandler = self.name + identifier
   if useROM:
     # run roms
     exportDict = {}
     self.raiseADebug("Switch to ROMs")
     # submit all the roms
     for romName, romInfo in self.romsDictionary.items():
       inputKwargs[romName]['prefix'] = romName+utils.returnIdSeparator()+identifier
       nextRom = False
       while not nextRom:
         if jobHandler.availability() > 0:
           romInfo['Instance'].submit(originalInput, samplerType, jobHandler, **inputKwargs[romName])
           self.raiseADebug("Job ", romName, " with identifier ", identifier, " is submitted")
           nextRom = True
         else:
           time.sleep(self.sleepTime)
     # collect the outputs from the runs of ROMs
     while True:
       finishedJobs = jobHandler.getFinished(uniqueHandler=uniqueHandler)
       for finishedRun in finishedJobs:
         self.raiseADebug("collect job with identifier ", identifier)
         evaluation = finishedRun.getEvaluation()
         if isinstance(evaluation, Runners.Error):
           self.raiseAnError(RuntimeError, "The job identified by "+finishedRun.identifier+" failed!")
         # collect output in temporary data object
         tempExportDict = evaluation
         exportDict = self.__mergeDict(exportDict, tempExportDict)
       if jobHandler.areTheseJobsFinished(uniqueHandler=uniqueHandler):
         self.raiseADebug("Jobs with uniqueHandler ", uniqueHandler, "are collected!")
         break
       time.sleep(self.sleepTime)
     exportDict['prefix'] = identifier
   else:
     # run model
     inputKwargs['prefix'] = self.modelInstance.name+utils.returnIdSeparator()+identifier
     inputKwargs['uniqueHandler'] = self.name + identifier
     moveOn = False
     while not moveOn:
       if jobHandler.availability() > 0:
         self.modelInstance.submit(originalInput, samplerType, jobHandler, **inputKwargs)
         self.raiseADebug("Job submitted for model ", self.modelInstance.name, " with identifier ", identifier)
         moveOn = True
       else:
         time.sleep(self.sleepTime)
     while not jobHandler.isThisJobFinished(self.modelInstance.name+utils.returnIdSeparator()+identifier):
       time.sleep(self.sleepTime)
     self.raiseADebug("Job finished ", self.modelInstance.name, " with identifier ", identifier)
     finishedRun = jobHandler.getFinished(jobIdentifier = inputKwargs['prefix'], uniqueHandler = uniqueHandler)
     evaluation = finishedRun[0].getEvaluation()
     if isinstance(evaluation, Runners.Error):
       self.raiseAnError(RuntimeError, "The model "+self.modelInstance.name+" identified by "+finishedRun[0].identifier+" failed!")
     # collect output in temporary data object
     exportDict = evaluation
     self.raiseADebug("Create exportDict")
   # used in the collectOutput
   exportDict['useROM'] = useROM
   return exportDict
예제 #8
0
  def __advanceModel(self, identifier, modelToExecute, origInputList, inputKwargs, inRunTargetEvaluations, samplerType, iterationCount, jobHandler = None):
    """
      This method is aimed to advance the execution of a sub-model and to collect the data using
      the realization
      @ In, identifier, str, current job identifier
      @ In, modelToExecute, super(Model), Model instance than needs to be avanced
      @ In, origInputList, list, list of model input
      @ In, inputKwargs, dict, dictionary of kwargs for this model
      @ In, inRunTargetEvaluations, DataObject, target evaluation for the model to advance
      @ In, samplerType, str, sampler Type
      @ In, iterationCount, int, iteration counter (1 if not picard)
      @ In, jobHandler, jobHandler instance, optional, jobHandler instance (available only if parallelStrategy == 2)
      @ Out, returnDict, dict, dictionary containing the data extracted from the target evaluation
      @ Out, gotOutputs, dict, dictionary containing all the data coming out the model
      @ Out, evaluation, dict, the evaluation dictinary with the "unprojected" data
    """
    returnDict = {}

    self.raiseADebug('Submitting model',modelToExecute['Instance'].name)
    localIdentifier =  modelToExecute['Instance'].name+utils.returnIdSeparator()+identifier
    if self.parallelStrategy == 1:
      # we evaluate the model directly
      try:
        evaluation = modelToExecute['Instance'].evaluateSample.original_function(modelToExecute['Instance'], origInputList, samplerType, inputKwargs)
      except Exception as e:
        excType, excValue, excTrace = sys.exc_info()
        evaluation = None
    else:
      moveOn = False
      while not moveOn:
        # run the model
        inputKwargs.pop("jobHandler", None)
        modelToExecute['Instance'].submit(origInputList, samplerType, jobHandler, **inputKwargs)
        ## wait until the model finishes, in order to get ready to run the subsequential one
        while not jobHandler.isThisJobFinished(localIdentifier):
          time.sleep(1.e-3)
        moveOn = True
      # get job that just finished to gather the results
      finishedRun = jobHandler.getFinished(jobIdentifier = localIdentifier, uniqueHandler=self.name+identifier)
      evaluation = finishedRun[0].getEvaluation()
      if isinstance(evaluation, rerror):
        evaluation = None
        excType, excValue, excTrace = finishedRun.exceptionTrace
        e = rerror
        # the model failed
        for modelToRemove in list(set(self.orderList) - set([modelToExecute['Instance'].name])):
          jobHandler.getFinished(jobIdentifier = modelToRemove + utils.returnIdSeparator() + identifier, uniqueHandler = self.name + identifier)
      else:
        # collect the target evaluation
        modelToExecute['Instance'].collectOutput(finishedRun[0],inRunTargetEvaluations)

    if not evaluation:
      # the model failed
      import traceback
      msg = io.StringIO()
      traceback.print_exception(excType, excValue, excTrace, limit=10, file=msg)
      msg = msg.getvalue().replace('\n', '\n        ')
      self.raiseAnError(RuntimeError, f'The Model "{modelToExecute["Instance"].name}" id "{localIdentifier}" '+
                        f'failed! Trace:\n{"*"*72}\n{msg}\n{"*"*72}')
    else:
      if self.parallelStrategy == 1:
        inRunTargetEvaluations.addRealization(evaluation)
      else:
        modelToExecute['Instance'].collectOutput(finishedRun[0],inRunTargetEvaluations)

    ## FIXME: The call asDataset() is unuseful here. It must be done because otherwise the realization(...) method from collector
    ## does not return the indexes values (TO FIX)
    inRunTargetEvaluations.asDataset()
    # get realization
    dataSet = inRunTargetEvaluations.realization(index=iterationCount-1,unpackXArray=True)
    ##FIXME: the following dict construction is a temporary solution since the realization method returns scalars if we have a PointSet
    dataSet = {key:np.atleast_1d(dataSet[key]) for key in dataSet}
    responseSpace         = dataSet
    gotOutputs  = {key: dataSet[key] for key in inRunTargetEvaluations.getVars("output") + inRunTargetEvaluations.getVars("indexes")}
    if '_indexMap' in dataSet.keys():
      gotOutputs['_indexMap'] = dataSet['_indexMap']

    #store the results in return dictionary
    # store the metadata
    returnDict['response'        ] = copy.deepcopy(evaluation) #  this deepcopy must stay! alfoa
    # overwrite with target evaluation filtering
    returnDict['response'        ].update(responseSpace)
    returnDict['prefix'          ] = np.atleast_1d(identifier)
    returnDict['general_metadata'] = inRunTargetEvaluations.getMeta(general=True)

    return returnDict, gotOutputs, evaluation
예제 #9
0
  def _externalRun(self,inRun, jobHandler = None):#, jobHandler):
    """
      Method that performs the actual run of the essembled model (separated from run method for parallelization purposes)
      @ In, inRun, tuple, tuple of Inputs, e.g. inRun[0]: actual dictionary of input, inRun[1]: string,
        the type of Sampler or Optimizer, inRun[2], dict, contains the information from the Sampler
      @ In, jobHandler, object, optional, instance of jobHandler (available if parallelStrategy==2)
      @ Out, returnEvaluation, tuple, the results of the essembled model:
                               - returnEvaluation[0] dict of results from each sub-model,
                               - returnEvaluation[1] the dataObjects where the projection of each model is stored
                               - returnEvaluation[2] dict used to store the optional outputs
    """
    originalInput = inRun[0]
    samplerType = inRun[1]
    inputKwargs = inRun[2]
    identifier = inputKwargs.pop('prefix')
    tempOutputs = {}
    inRunTargetEvaluations = {}

    for modelIn in self.orderList:
      # reset the DataObject for the projection
      self.localTargetEvaluations[modelIn].reset()
      # deepcopy assures distinct copies
      inRunTargetEvaluations[modelIn] = copy.deepcopy(self.localTargetEvaluations[modelIn])
    residueContainer = dict.fromkeys(self.modelsDictionary.keys())
    gotOutputs       = [{}]*len(self.orderList)
    typeOutputs      = ['']*len(self.orderList)

    # if nonlinear system, initialize residue container
    if self.activatePicard:
      for modelIn in self.orderList:
        residueContainer[modelIn] = {'residue':{},'iterValues':[{}]*2}
        for out in self.modelsDictionary[modelIn]['Output']:
          residueContainer[modelIn]['residue'][out] = np.zeros(1)
          residueContainer[modelIn]['iterValues'][0][out] = np.zeros(1)
          residueContainer[modelIn]['iterValues'][1][out] = np.zeros(1)

    maxIterations = self.maxIterations if self.activatePicard else 1
    iterationCount = 0
    while iterationCount < maxIterations:
      returnDict     = {}
      iterationCount += 1

      if self.activatePicard:
        self.raiseAMessage("Picard's Iteration "+ str(iterationCount))

      for modelCnt, modelIn in enumerate(self.orderList):
        # clear the model's Target Evaluation data object
        # in case there are metadataToTransfer, let's collect them from the source
        metadataToTransfer = None
        if self.modelsInputDictionary[modelIn]['metadataToTransfer']:
          metadataToTransfer = {}
        for metadataToGet, source, alias in self.modelsInputDictionary[modelIn]['metadataToTransfer']:
          if metadataToGet in returnDict[source]['general_metadata']:
            metaDataValue = returnDict[source]['general_metadata'][metadataToGet]
            metaDataValue = metaDataValue[0] if len(metaDataValue) == 1 else metaDataValue
            metadataToTransfer[metadataToGet if alias is None else alias] = metaDataValue
          elif metadataToGet in returnDict[source]['response']:
            metaDataValue = returnDict[source]['response'][metadataToGet]
            metaDataValue = metaDataValue[0] if len(metaDataValue) == 1 else metaDataValue
            metadataToTransfer[metadataToGet if alias is None else alias] = metaDataValue
          else:
            self.raiseAnError(RuntimeError,'metadata "'+metadataToGet+'" is not present among the ones available in source "'+source+'"!')
        # get dependent outputs
        dependentOutput = self.__retrieveDependentOutput(modelIn, gotOutputs, typeOutputs)
        # if nonlinear system, check for initial coditions
        if iterationCount == 1  and self.activatePicard:
          sampledVars = inputKwargs[modelIn]['SampledVars'].keys()
          conditionsToCheck = set(self.modelsDictionary[modelIn]['Input']) - set(itertools.chain(dependentOutput.keys(),sampledVars))
          for initialConditionToSet in conditionsToCheck:
            if initialConditionToSet in self.initialConditions.keys():
              dependentOutput[initialConditionToSet] = self.initialConditions[initialConditionToSet]
            else:
              self.raiseAnError(IOError,"No initial conditions provided for variable "+ initialConditionToSet)
        # set new identifiers
        inputKwargs[modelIn]['prefix']        = modelIn+utils.returnIdSeparator()+identifier
        inputKwargs[modelIn]['uniqueHandler'] = self.name+identifier
        if metadataToTransfer is not None:
          inputKwargs[modelIn]['metadataToTransfer'] = metadataToTransfer

        for key, value in dependentOutput.items():
          inputKwargs[modelIn]["SampledVars"  ][key] =  dependentOutput[key]
          ## FIXME it is a mistake (Andrea). The SampledVarsPb for this variable should be transferred from outside
          ## Who has this information? -- DPM 4/11/17
          inputKwargs[modelIn]["SampledVarsPb"][key] =  1.0
        self._replaceVariablesNamesWithAliasSystem(inputKwargs[modelIn]["SampledVars"  ],'input',False)
        self._replaceVariablesNamesWithAliasSystem(inputKwargs[modelIn]["SampledVarsPb"],'input',False)
        ## FIXME: this will come after we rework the "runInfo" collection in the code
        ## if run info is present, we need to pass to to kwargs
        ##if self.runInfoDict and 'Code' == self.modelsDictionary[modelIn]['Instance'].type:
        ##  inputKwargs[modelIn].update(self.runInfoDict)

        retDict, gotOuts, evaluation = self.__advanceModel(identifier, self.modelsDictionary[modelIn],
                                                        originalInput[modelIn], inputKwargs[modelIn],
                                                        inRunTargetEvaluations[modelIn], samplerType,
                                                        iterationCount, jobHandler)

        returnDict[modelIn] = retDict
        typeOutputs[modelCnt] = inRunTargetEvaluations[modelIn].type
        gotOutputs[modelCnt] =  gotOuts
        tempOutputs[modelIn] = evaluation

        # if nonlinear system, compute the residue
        ## it looks like this is handling _indexMap, but it's not clear since there's not a way to test it (yet).
        if self.activatePicard:
          residueContainer[modelIn]['iterValues'][1] = copy.copy(residueContainer[modelIn]['iterValues'][0])
          for out in  inRunTargetEvaluations[modelIn].getVars("output"):
            residueContainer[modelIn]['iterValues'][0][out] = copy.copy(gotOutputs[modelCnt][out])
            if iterationCount == 1:
              residueContainer[modelIn]['iterValues'][1][out] = np.zeros(len(residueContainer[modelIn]['iterValues'][0][out]))
          for out in gotOutputs[modelCnt].keys():
            residueContainer[modelIn]['residue'][out] = abs(np.asarray(residueContainer[modelIn]['iterValues'][0][out]) - np.asarray(residueContainer[modelIn]['iterValues'][1][out]))
          residueContainer[modelIn]['Norm'] =  np.linalg.norm(np.asarray(list(residueContainer[modelIn]['iterValues'][1].values()))-np.asarray(list(residueContainer[modelIn]['iterValues'][0].values())))

      # if nonlinear system, check the total residue and convergence
      if self.activatePicard:
        iterZero = []
        iterOne = []
        for modelIn in self.orderList:
          iterZero += residueContainer[modelIn]['iterValues'][0].values()
          iterOne  += residueContainer[modelIn]['iterValues'][1].values()
        residueContainer['TotalResidue'] = np.linalg.norm(np.asarray(iterOne)-np.asarray(iterZero))
        self.raiseAMessage("Picard's Iteration Norm: "+ str(residueContainer['TotalResidue']))
        residualPass = residueContainer['TotalResidue'] <= self.convergenceTol
        # sometimes there can be multiple residual values
        if hasattr(residualPass,'__len__'):
          residualPass = all(residualPass)
        if residualPass:
          self.raiseAMessage("Picard's Iteration converged. Norm: "+ str(residueContainer['TotalResidue']))
          break
    returnEvaluation = returnDict, inRunTargetEvaluations, tempOutputs
    return returnEvaluation