Beispiel #1
0
    def runStatic(self, container, Inputs):
        """
      This method performs a static analysis of the graph model
      @ In, container, object, self-like object where all the variables can be stored
      @ In, Inputs, dict, dictionary of inputs from RAVEN
      @ Out, dictOut, dict, dictionary containing the status of all output variables
    """
        mapping = copy.deepcopy(container.mapping)
        nodes = copy.deepcopy(container.nodes)

        for key in Inputs.keys():
            if key in mapping.keys():
                if mapping[key] in nodes.keys() and Inputs[key][0] == 1.0:
                    nodes.pop(mapping[key], None)
                    for node in nodes.keys():
                        if mapping[key] in nodes[node]:
                            nodes[node].remove(mapping[key])

        ravenGraph = GS.graphObject(nodes)

        dictOut = {}
        for nodeO in container.nodesOUT:
            paths = []
            for nodeI in container.nodesIN:
                paths = paths + ravenGraph.findAllPaths(nodeI, nodeO)
            var = container.InvMapping[nodeO]
            if paths:
                dictOut[var] = np.asarray(0.)
            else:
                dictOut[var] = np.asarray(1.)
        return dictOut
Beispiel #2
0
    def initialize(self, runInfo, inputs, initDict=None):
        """
      Method to initialize the EnsembleModel
      @ In, runInfo, dict, is the run info from the jobHandler
      @ In, inputs, list, is a list containing whatever is passed with an input role in the step
      @ In, initDict, dict, optional, dictionary of all objects available in the step is using this model
      @ Out, None
    """
        # store the job ids for jobs that we haven't collected optional output from
        # collect name of all the outputs in the Step
        outputsNames = []
        if initDict is not None:
            outputsNames = [output.name for output in initDict['Output']]

        # here we check if all the inputs inputted in the Step containing the EnsembleModel are actually used
        checkDictInputsUsage = dict((inp, False) for inp in inputs)

        # collect the models
        self.allOutputs = set()
        for modelClass, modelType, modelName, modelInstance in self.assemblerDict[
                'Model']:
            self.modelsDictionary[modelName]['Instance'] = modelInstance
            inputInstancesForModel = []
            for inputName in self.modelsDictionary[modelName]['Input']:
                inputInstancesForModel.append(
                    self.retrieveObjectFromAssemblerDict('Input', inputName))
                checkDictInputsUsage[inputInstancesForModel[-1]] = True
            self.modelsDictionary[modelName][
                'InputObject'] = inputInstancesForModel

            # retrieve 'Output' objects, such as DataObjects, Databases to check if they are present in the Step
            if self.modelsDictionary[modelName]['Output'] is not None:
                outputNamesModel = []
                for output in self.modelsDictionary[modelName]['Output']:
                    outputObject = self.retrieveObjectFromAssemblerDict(
                        'Output', output, True)
                    if outputObject.name not in outputsNames:
                        self.raiseAnError(
                            IOError,
                            "The optional Output " + outputObject.name +
                            " listed for Model " + modelName +
                            " is not present among the Step outputs!!!")
                    outputNamesModel.append(outputObject.name)
                self.modelsDictionary[modelName][
                    'OutputObject'] = outputNamesModel
            else:
                self.modelsDictionary[modelName]['OutputObject'] = []

            # initialize model
            self.modelsDictionary[modelName]['Instance'].initialize(
                runInfo, inputInstancesForModel, initDict)
            # Generate a list of modules that needs to be imported for internal parallelization (parallel python)
            self.mods = self.mods + list(
                set(self.modelsDictionary[modelName]['Instance'].mods) -
                set(self.mods))
            # retrieve 'TargetEvaluation' DataObjects
            targetEvaluation = self.retrieveObjectFromAssemblerDict(
                'TargetEvaluation',
                self.modelsDictionary[modelName]['TargetEvaluation'], True)
            # assert acceptable TargetEvaluation types are used
            if targetEvaluation.type not in [
                    'PointSet', 'HistorySet', 'DataSet'
            ]:
                self.raiseAnError(
                    IOError,
                    "Only DataObjects are allowed as TargetEvaluation object. Got "
                    + str(targetEvaluation.type) + "!")
            # localTargetEvaluations are for passing data and then resetting, not keeping data between samples
            self.localTargetEvaluations[modelName] = copy.deepcopy(
                targetEvaluation)
            # get input variables
            inps = targetEvaluation.getVars('input')
            # get pivot parameters in input space if any and add it in the 'Input' list
            inDims = set([
                item for subList in targetEvaluation.getDimensions(
                    var="input").values() for item in subList
            ])
            # assemble the two lists
            self.modelsDictionary[modelName]['Input'] = inps + list(inDims -
                                                                    set(inps))
            # get output variables
            outs = targetEvaluation.getVars("output")
            # get pivot parameters in output space if any and add it in the 'Output' list
            outDims = set([
                item for subList in targetEvaluation.getDimensions(
                    var="output").values() for item in subList
            ])
            ## note, if a dimension is in both the input space AND output space, consider it an input
            outDims = outDims - inDims
            newOuts = outs + list(set(outDims) - set(outs))
            self.modelsDictionary[modelName]['Output'] = newOuts
            self.allOutputs = self.allOutputs.union(newOuts)
        # END loop to collect models
        self.allOutputs = list(self.allOutputs)

        # check if all the inputs passed in the step are linked with at least a model
        if not all(checkDictInputsUsage.values()):
            unusedFiles = ""
            for inFile, used in checkDictInputsUsage.items():
                if not used:
                    unusedFiles += " " + inFile.name
            self.raiseAnError(
                IOError,
                "The following inputs specified in the Step are not used in the EnsembleModel: "
                + unusedFiles)
        # construct chain connections
        modelsToOutputModels = dict.fromkeys(self.modelsDictionary.keys(),
                                             None)
        # find matching models
        for modelIn in self.modelsDictionary.keys():
            outputMatch = []
            for i in range(len(self.modelsDictionary[modelIn]['Output'])):
                match = self.__findMatchingModel(
                    'Input', self.modelsDictionary[modelIn]['Output'][i])
                outputMatch.extend(match if match is not None else [])
            outputMatch = list(set(outputMatch))
            modelsToOutputModels[modelIn] = outputMatch
        # construct the ensemble model directed graph
        self.ensembleModelGraph = graphStructure.graphObject(
            modelsToOutputModels)
        # make some checks
        # FIXME: the following check is too tight, even if the models are connected, the
        # code may still raise an error. I think in really, we do not need to raise an error,
        # maybe a warning is enough. For example:
        #   a -> b -> c
        #        ^
        #        |
        #   e -> d -> f
        if not self.ensembleModelGraph.isConnectedNet():
            isolatedModels = self.ensembleModelGraph.findIsolatedVertices()
            self.raiseAnError(
                IOError,
                "Some models are not connected. Possible candidates are: " +
                ' '.join(isolatedModels))
        # get all paths
        allPath = self.ensembleModelGraph.findAllUniquePaths(
            self.initialStartModels)
        ###################################################
        # to be removed once executionList can be handled #
        self.orderList = self.ensembleModelGraph.createSingleListOfVertices(
            allPath)
        self.raiseAMessage("Model Execution list: " +
                           ' -> '.join(self.orderList))
        ###################################################
        ###########################################################################################
        # To be uncommented when the execution list can be handled                                #
        # if len(allPath) > 1: self.executionList = self.__getExecutionList(self.orderList,allPath) #
        # else               : self.executionList = allPath[-1]                                     #
        ###########################################################################################
        # check if Picard needs to be activated
        self.activatePicard = self.ensembleModelGraph.isALoop()
        if self.activatePicard:
            self.raiseAMessage(
                "EnsembleModel connections determined a non-linear system. Picard's iterations activated!"
            )
            if len(self.initialStartModels) == 0:
                self.raiseAnError(
                    IOError,
                    "The 'initialStartModels' xml node is missing, this is required siince the Picard's iteration is activated!"
                )
            if len(self.initialConditions.keys()) == 0:
                self.raiseAnError(
                    IOError,
                    "Picard's iterations mode activated but no intial conditions provided!"
                )
        else:
            if len(self.initialStartModels) != 0:
                self.raiseAnError(
                    IOError,
                    "The 'initialStartModels' xml node is not needed for non-Picard calculations, since the running sequence can be automatically determined by the code! Please delete this node to avoid a mistake."
                )
            self.raiseAMessage(
                "EnsembleModel connections determined a linear system. Picard's iterations not activated!"
            )

        for modelIn in self.modelsDictionary.keys():
            # in case there are metadataToTransfer, let's check if the source model is executed before the one that requests info
            if self.modelsDictionary[modelIn]['metadataToTransfer']:
                indexModelIn = self.orderList.index(modelIn)
                for metadataToGet, source, _ in self.modelsDictionary[modelIn][
                        'metadataToTransfer']:
                    if self.orderList.index(source) >= indexModelIn:
                        self.raiseAnError(
                            IOError, 'In model "' + modelIn +
                            '" the "metadataToTransfer" named "' +
                            metadataToGet + '" is linked to the source"' +
                            source +
                            '" that will be executed after this model.')
        self.needToCheckInputs = True
        # write debug statements
        self.raiseADebug(
            "Specs of Graph Network represented by EnsembleModel:")
        self.raiseADebug("Graph Degree Sequence is    : " +
                         str(self.ensembleModelGraph.degreeSequence()))
        self.raiseADebug("Graph Minimum/Maximum degree: " +
                         str((self.ensembleModelGraph.minDelta(),
                              self.ensembleModelGraph.maxDelta())))
        self.raiseADebug("Graph density/diameter      : " +
                         str((self.ensembleModelGraph.density(),
                              self.ensembleModelGraph.diameter())))
Beispiel #3
0
def _createEvalProcess(components, variables):
  """
    Sorts the cashflow evaluation process so sensible evaluation order is used
    @ In, components, list, list of CashFlows.Component instances
    @ In, variables, dict, variable-value map from RAVEN
    @ Out, ordered, list, list of ordered cashflows to evaluate (in order)
  """
  # TODO does this work with float drivers (e.g. already-evaluated drivers)?
  # storage for creating graph sequence
  driverGraph = defaultdict(list)
  driverGraph['EndNode'] = []
  evaluated = [] # for cashflows that have already been evaluated and don't need more treatment
  for comp in components:
    lifetime = comp.getLifetime()
    # find multiplier variables
    multipliers = comp.getMultipliers()
    for mult in multipliers:
      if mult is None:
        continue
      if mult not in variables.keys():
        raise RuntimeError('CashFlow: multiplier "{}" required for Component "{}" but not found among variables!'.format(mult, comp.name))
    # find order in which to evaluate cash flow components
    for c, cf in enumerate(comp.getCashflows()):
      # keys for graph are drivers, cash flow names
      driver = cf.getParam('driver')
      # does the driver come from the variable list, or from another cashflow, or is it already evaluated?
      cfn = '{}|{}'.format(comp.name, cf.name)
      found = False
      if driver is None or utils.isAFloatOrInt(driver) or isinstance(driver, np.ndarray):
        found = True
        # TODO assert it's already filled?
        evaluated.append(cfn)
        continue
      elif driver in variables:
        found = True
        # check length of driver
        n = len(np.atleast_1d(variables[driver]))
        if n > 1 and n != lifetime+1:
          raise RuntimeError(('Component "{c}" TEAL {cf} driver variable "{d}" has "{n}" entries, '+\
                              'but "{c}" has a lifetime of {el}!')
                             .format(c=comp.name,
                                     cf=cf.name,
                                     d=driver,
                                     n=n,
                                     el=lifetime))
      else:
        # driver should be in cash flows if not in variables
        driverComp, driverCf = driver.split('|')
        for matchComp in components:
          if matchComp.name == driverComp:
            # for cross-referencing, component lifetimes have to be the same!
            if matchComp.getLifetime() != comp.getLifetime():
              raise RuntimeError(('Lifetimes for Component "{d}" and cross-referenced Component {m} ' +\
                                  'do not match, so no cross-reference possible!')
                                 .format(d=driverComp, m=matchComp.name))
            found = True # here this means that so far the component was found, not the specific cash flow.
            break
        else:
          found = False
        # if the component was found, check the cash flow is part of the component
        if found:
          if driverCf not in list(m_cf.name for m_cf in matchComp.getCashflows()):
            found = False
      if not found:
        raise RuntimeError(('Component "{c}" TEAL {cf} driver variable "{d}" was not found ' +\
                            'among variables or other cashflows!')
                           .format(c=comp.name,
                                   cf=cf.name,
                                   d=driver))

      # assure each cashflow is in the mix, and has an EndNode to rely on (helps graph construct accurately)
      driverGraph[cfn].append('EndNode')
      # each driver depends on its cashflow
      driverGraph[driver].append(cfn)
  return evaluated + graphObject(driverGraph).createSingleListOfVertices()