def initialize(self, runInfo, oriInputFiles): """ Method to initialize the run of a new step @ In, runInfo, dict, dictionary of the info in the <RunInfo> XML block @ In, oriInputFiles, list, list of the original input files @ Out, None """ import RAVENparser index = self.__findInputFile(oriInputFiles) parser = RAVENparser.RAVENparser(oriInputFiles[index].getAbsFile()) # get the OutStreams names self.outStreamsNamesAndType, self.outDatabases = parser.returnOutputs() # check if the linked DataObjects are among the Outstreams if self.linkedDataObjectOutStreamsNames: pointSetNumber, historySetNumber = 0, 0 for outstream, dataObj in self.outStreamsNamesAndType.items(): if outstream in self.linkedDataObjectOutStreamsNames: if dataObj[1].strip() == 'PointSet': pointSetNumber += 1 else: historySetNumber += 1 if pointSetNumber > 1 or historySetNumber > 1: raise IOError( self.printTag + ' ERROR: Only one OutStream for PointSet and/or one for HistorySet can be linked as output export!' ) if pointSetNumber == 0 and historySetNumber == 0: raise IOError( self.printTag + ' ERROR: No one of the OutStreams linked to this interface have been found in the SLAVE RAVEN!' + ' Expected: "' + ' '.join(self.linkedDataObjectOutStreamsNames) + '" but found "' + ' '.join(self.outStreamsNamesAndType.keys()) + '"!') else: # self.linkedDatabaseName for dbName, dbXml in self.outDatabases.items(): if dbName == self.linkedDatabaseName: break else: # the one we want wasn't found! raise IOError( f'{self.printTag} ERROR: The Database named "{self.linkedDatabaseName}" listed ' + 'in <outputDatabase> was not found among the written Databases in active Steps in the inner RAVEN! ' + f'Found: {list(self.outDatabases.keys())}') # get variable groups varGroupNames = parser.returnVarGroups() ## store globally self.variableGroups = varGroupNames # get inner working dir self.innerWorkingDir = parser.workingDir # check operating system and define prefix if needed if platform.startswith("win") and utils.which("bash.exe") is not None: self.preCommand = 'bash.exe'
def initialize(self, runInfo, oriInputFiles): """ Method to initialize the run of a new step @ In, runInfo, dict, dictionary of the info in the <RunInfo> XML block @ In, oriInputFiles, list, list of the original input files @ Out, None """ import RAVENparser index = self.__findInputFile(oriInputFiles) parser = RAVENparser.RAVENparser(oriInputFiles[index].getAbsFile()) # get the OutStreams names self.outStreamsNamesAndType = parser.returnOutstreamsNamesAnType() # check if the linked DataObjects are among the Outstreams pointSetNumber, historySetNumber = 0, 0 for outstream, dataObj in self.outStreamsNamesAndType.items(): if outstream in self.linkedDataObjectOutStreamsNames: if dataObj[1].strip() == 'PointSet': pointSetNumber += 1 else: historySetNumber += 1 if pointSetNumber > 1 or historySetNumber > 1: raise IOError( self.printTag + ' ERROR: Only one OutStream for PointSet and/or one for HistorySet can be linked as output export!' ) if pointSetNumber == 0 and historySetNumber == 0: raise IOError( self.printTag + ' ERROR: No one of the OutStreams linked to this interface have been found in the SLAVE RAVEN!' + ' Expected: "' + ' '.join(self.linkedDataObjectOutStreamsNames) + '" but found "' + ' '.join(self.outStreamsNamesAndType.keys()) + '"!') # get variable groups varGroupNames = parser.returnVarGroups() ## store globally self.variableGroups = varGroupNames # get inner working dir self.innerWorkingDir = parser.workingDir
def createNewInput(self, currentInputFiles, oriInputFiles, samplerType, **Kwargs): """ this generates a new input file depending on which sampler has been chosen @ In, currentInputFiles, list, list of current input files (input files from last this method call) @ In, oriInputFiles, list, list of the original input files @ In, samplerType, string, Sampler type (e.g. MonteCarlo, Adaptive, etc. see manual Samplers section) @ In, Kwargs, dictionary, kwarded dictionary of parameters. In this dictionary there is another dictionary called "SampledVars" where RAVEN stores the variables that got sampled (e.g. Kwargs['SampledVars'] => {'var1':10,'var2':40}) @ Out, newInputFiles, list, list of newer input files, list of the new input files (modified and not) """ import RAVENparser if 'dynamiceventtree' in str(samplerType).strip().lower(): raise IOError( self.printTag + ' ERROR: DynamicEventTree-based sampling not supported!') index = self.__findInputFile(currentInputFiles) parser = RAVENparser.RAVENparser(currentInputFiles[index].getAbsFile()) # get sampled variables modifDict = Kwargs['SampledVars'] # apply conversion scripts for source, convDict in self.conversionDict.items(): module = utils.importFromPath(source) varVals = dict((var, np.asarray(modifDict[var])) for var in convDict['variables']) # modify vector+ variables that need to be flattened if convDict['noScalar']: # call conversion newVars = module.convertNotScalarSampledVariables(varVals) # check type if type(newVars).__name__ != 'dict': raise IOError( self.printTag + ' ERROR: convertNotScalarSampledVariables in "{}" must return a dictionary!' .format(source)) # apply new and/or updated values modifDict.update(newVars) # modify scalar variables if convDict['scalar']: # call conversion, value changes happen in-place module.manipulateScalarSampledVariables(modifDict) # we work on batchSizes here newBatchSize = Kwargs['NumMPI'] internalParallel = Kwargs.get('internalParallel', False) if int(Kwargs['numberNodes']) > 0: # we are in a distributed memory machine => we allocate a node file nodeFileToUse = os.path.join(Kwargs['BASE_WORKING_DIR'], "node_" + str(Kwargs['INDEX'])) if os.path.exists(nodeFileToUse): modifDict['RunInfo|mode'] = 'mpi' modifDict['RunInfo|mode|nodefile'] = nodeFileToUse else: raise IOError(self.printTag + ' ERROR: The nodefile "' + str(nodeFileToUse) + '" does not exist!') if internalParallel or newBatchSize > 1: # either we have an internal parallel or NumMPI > 1 modifDict['RunInfo|batchSize'] = newBatchSize #modifDict['RunInfo|internalParallel'] = internalParallel # make tree modifiedRoot = parser.modifyOrAdd(modifDict, save=True, allowAdd=True) # modify tree if self.inputManipulationModule is not None: module = utils.importFromPath(self.inputManipulationModule) modifiedRoot = module.modifyInput(modifiedRoot, modifDict) # write input file parser.printInput(modifiedRoot, currentInputFiles[index].getAbsFile()) # copy slave files parser.copySlaveFiles(currentInputFiles[index].getPath()) return currentInputFiles
def createNewInput(self,currentInputFiles,oriInputFiles,samplerType,**Kwargs): """ this generates a new input file depending on which sampler has been chosen @ In, currentInputFiles, list, list of current input files (input files from last this method call) @ In, oriInputFiles, list, list of the original input files @ In, samplerType, string, Sampler type (e.g. MonteCarlo, Adaptive, etc. see manual Samplers section) @ In, Kwargs, dictionary, kwarded dictionary of parameters. In this dictionary there is another dictionary called "SampledVars" where RAVEN stores the variables that got sampled (e.g. Kwargs['SampledVars'] => {'var1':10,'var2':40}) @ Out, newInputFiles, list, list of newer input files, list of the new input files (modified and not) """ import RAVENparser if 'dynamiceventtree' in str(samplerType).strip().lower(): raise IOError(self.printTag+' ERROR: DynamicEventTree-based sampling not supported!') index = self.__findInputFile(currentInputFiles) parser = RAVENparser.RAVENparser(currentInputFiles[index].getAbsFile()) # get the OutStreams names self.outStreamsNamesAndType = parser.returnOutstreamsNamesAnType() # check if the linked DataObjects are among the Outstreams pointSetNumber, historySetNumber = 0, 0 for outstream, dataObj in self.outStreamsNamesAndType.items(): if outstream in self.linkedDataObjectOutStreamsNames: if dataObj[1].strip() == 'PointSet': pointSetNumber+=1 else: historySetNumber+=1 if pointSetNumber > 1 or historySetNumber > 1: raise IOError(self.printTag+' ERROR: Only one OutStream for PointSet and/or one for HistorySet can be linked as output export!') if pointSetNumber == 0 and historySetNumber == 0: raise IOError(self.printTag+' ERROR: No one of the OutStreams linked to this interface have been found in the SLAVE RAVEN!' +' Expected: "'+' '.join(self.linkedDataObjectOutStreamsNames)+'" but found "' +' '.join(self.outStreamsNamesAndType.keys())+'"!') # get variable groups varGroupNames = parser.returnVarGroups() if len(varGroupNames) > 0: # check if they are not present in the linked outstreams for outstream in self.linkedDataObjectOutStreamsNames: inputNode = self.outStreamsNamesAndType[outstream][2].find("Input") outputNode = self.outStreamsNamesAndType[outstream][2].find("Output") inputVariables = inputNode.text.split(",") if inputNode is not None else [] outputVariables = outputNode.text.split(",") if outputNode is not None else [] if any (varGroupName in inputVariables+outputVariables for varGroupName in varGroupNames): raise IOError(self.printTag+' ERROR: The VariableGroup system is not supported in the current ' + 'implementation of the interface for the DataObjects specified in the '+ '<outputExportOutStreams> XML node!') # get inner working dir self.innerWorkingDir = parser.workingDir # get sampled variables modifDict = Kwargs['SampledVars'] # apply conversion scripts for source,convDict in self.conversionDict.items(): module = utils.importFromPath(source) varVals = dict((var,np.asarray(modifDict[var])) for var in convDict['variables']) # modify vector+ variables that need to be flattened if convDict['noScalar']: # call conversion newVars = module.convertNotScalarSampledVariables(varVals) # check type if type(newVars).__name__ != 'dict': raise IOError(self.printTag+' ERROR: convertNotScalarSampledVariables in "{}" must return a dictionary!'.format(source)) # apply new and/or updated values modifDict.update(newVars) # modify scalar variables if convDict['scalar']: # call conversion, value changes happen in-place module.manipulateScalarSampledVariables(modifDict) # we work on batchSizes here newBatchSize = Kwargs['NumMPI'] internalParallel = Kwargs.get('internalParallel',False) if int(Kwargs['numberNodes']) > 0: # we are in a distributed memory machine => we allocate a node file nodeFileToUse = os.path.join(Kwargs['BASE_WORKING_DIR'],"node_" +str(Kwargs['INDEX'])) if os.path.exists(nodeFileToUse): modifDict['RunInfo|mode' ] = 'mpi' modifDict['RunInfo|mode|nodefile' ] = nodeFileToUse else: raise IOError(self.printTag+' ERROR: The nodefile "'+str(nodeFileToUse)+'" does not exist!') if internalParallel or newBatchSize > 1: # either we have an internal parallel or NumMPI > 1 modifDict['RunInfo|batchSize' ] = newBatchSize #modifDict['RunInfo|internalParallel'] = internalParallel #make tree modifiedRoot = parser.modifyOrAdd(modifDict,save=True,allowAdd = True) #make input parser.printInput(modifiedRoot,currentInputFiles[index].getAbsFile()) # copy slave files parser.copySlaveFiles(currentInputFiles[index].getPath()) return currentInputFiles