def __init__(self): """ Constructor. @ In, None @ Out, None """ BaseType.__init__(self) self.name = 'DataObject' self.printTag = self.name self._sampleTag = 'RAVEN_sample_ID' # column name to track samples self.protectedTags = ['RAVEN_parentID', 'RAVEN_isEnding' ] # list(str) protected RAVEN variable names, # should not be avail to user as var names self._inputs = [] # list(str) if input variables self._outputs = [] # list(str) of output variables self._metavars = [] # list(str) of POINTWISE metadata variables self._orderedVars = [] # list(str) of vars IN ORDER of their index self._meta = {} # dictionary to collect meta until data is collapsed self._selectInput = None # if not None, describes how to collect input data from history self._selectOutput = None # if not None, describes how to collect output data from history self._pivotParams = { } # independent dimensions as keys, values are the vars that depend on them self._fromVarToIndex = { } # mapping between variables and indexes ({var:index}). # "index" here refers to dimensional variables (e.g. time, x, y, z etc) self._aliases = {} # variable aliases self._data = None # underlying data structure self._collector = None # object used to collect samples self._inputKDTree = None # for finding outputs given inputs (pointset only?) self._scaleFactors = None # scaling factors inputs as {var:(mean,scale)} self.hierarchical = False # this flag controls the printing/plotting of the dataobject
def __init__(self): """ Init of Base class @ In, None @ Out, None """ BaseType.__init__(self) ## Use the class name as the type, so as we extend this class, this is ## automatically updated to be the correct value. Honestly, we shouldn't ## need this as we can just reference the class name wherever this is used. ## Otherwise, if you don't agree with that sentiment, then this should at ## least propogate itself up the hierarchy self.type = self.__class__.__name__ # outstreaming options self.options = {} # counter self.counter = 0 # overwrite outstream? self.overwrite = True # outstream types available self.availableOutStreamType = [] # number of agregated outstreams self.numberAggregatedOS = 1 # optional sub directory for printing and plotting self.subDirectory = None self.printTag = 'OUTSTREAM MANAGER' self.filename = ''
def __init__(self): """ Default Constructor that will initialize member variables with reasonable defaults or empty lists/dictionaries where applicable. @ In, None @ Out, None """ BaseType.__init__(self) Assembler.__init__(self) self.counter = 0 # Counter of the samples performed (better the input generated!!!). It is reset by calling the function self.initialize self.auxcnt = 0 # Aux counter of samples performed (for its usage check initialize method) self.limit = sys.maxsize # maximum number of Samples (for example, Monte Carlo = Number of HistorySet to run, DET = Unlimited) self.toBeSampled = { } # Sampling mapping dictionary {'Variable Name':'name of the distribution'} self.dependentSample = { } # Sampling mapping dictionary for dependent variables {'Variable Name':'name of the external function'} self.distDict = { } # Contains the instance of the distribution to be used, it is created every time the sampler is initialized. keys are the variable names self.funcDict = { } # Contains the instance of the function to be used, it is created every time the sampler is initialized. keys are the variable names self.values = { } # for each variable the current value {'var name':value} self.inputInfo = { } # depending on the sampler several different type of keywarded information could be present only one is mandatory, see below self.initSeed = None # if not provided the seed is randomly generated at the istanciation of the sampler, the step can override the seed by sending in another seed self.inputInfo[ 'SampledVars'] = self.values # this is the location where to get the values of the sampled variables self.inputInfo['SampledVarsPb'] = { } # this is the location where to get the probability of the sampled variables self.inputInfo[ 'PointProbability'] = None # this is the location where the point wise probability is stored (probability associated to a sampled point) self.inputInfo['crowDist'] = { } # Stores a dictionary that contains the information to create a crow distribution. Stored as a json object self.constants = {} # In this dictionary self.reseedAtEachIteration = False # Logical flag. True if every newer evaluation is performed after a new reseeding self.FIXME = False # FIXME flag self.printTag = self.type # prefix for all prints (sampler type) self.restartData = None # presampled points to restart from self.restartTolerance = 1e-15 # strictness with which to find matches in the restart data self._endJobRunnable = sys.maxsize # max number of inputs creatable by the sampler right after a job ends (e.g., infinite for MC, 1 for Adaptive, etc) ###### self.variables2distributionsMapping = { } # for each variable 'varName' , the following informations are included: 'varName': {'dim': 1, 'reducedDim': 1,'totDim': 2, 'name': 'distName'} ; dim = dimension of the variable; reducedDim = dimension of the variable in the transformed space; totDim = total dimensionality of its associated distribution self.distributions2variablesMapping = { } # for each variable 'distName' , the following informations are included: 'distName': [{'var1': 1}, {'var2': 2}]} where for each var it is indicated the var dimension self.NDSamplingParams = { } # this dictionary contains a dictionary for each ND distribution (key). This latter dictionary contains the initialization parameters of the ND inverseCDF ('initialGridDisc' and 'tolerance') ###### self.addAssemblerObject('Restart', '-n', True) #used for PCA analysis self.variablesTransformationDict = { } # for each variable 'modelName', the following informations are included: {'modelName': {latentVariables:[latentVar1, latentVar2, ...], manifestVariables:[manifestVar1,manifestVar2,...]}} self.transformationMethod = { } # transformation method used in variablesTransformation node {'modelName':method} self.entitiesToRemove = [ ] # This variable is used in order to make sure the transformation info is printed once in the output xml file.
def __init__(self): """ This is the basic method initialize the metric object @ In, none @ Out, none """ BaseType.__init__(self) self.type = self.__class__.__name__ self.name = self.__class__.__name__
def __init__(self): """ This is the basic method initialize the metric object @ In, none @ Out, none """ BaseType.__init__(self) self.type = self.__class__.__name__ self.name = self.__class__.__name__ self.acceptsProbability = False #If True the metric needs to be able to handle (value,probability) where value and probability are lists self.acceptsDistribution = False #If True the metric needs to be able to handle a passed in Distribution
def __init__(self): """ Constructor @ In, None @ Out, None """ BaseType.__init__(self) # Base Class self.database = None # Database object self.databaseDir = '' # Database directory. Default = working directory. self.workingDir = '' # self.printTag = 'DATABASE' # For printing verbosity labels self.variables = None # if not None, list of specific variables requested to be stored by user
def __init__(self): """ Constructor @ In, None @ Out, None """ BaseType.__init__(self) self.__file = None # when open, refers to open file, else None self.__path = '' # file path self.__base = '' # file base self.__ext = None # file extension self.__linkedModel = None # hard link to a certain Code subtype (e.g. RELAP-7, MooseBasedApp, etc,) self.type = None # type ("type" in the input) to label a file to any particular subcode in the code interface self.perturbable = False # is this file perturbable by a sampling strategy?
def __init__(self): """ Constructor @ In, None @ Out, None """ # Base Class BaseType.__init__(self) # Database object self.database = None # Database directory. Default = working directory. self.databaseDir = '' self.workingDir = '' self.printTag = 'DATABASE'
def __init__(self): """ Default Constructor that will initialize member variables with reasonable defaults or empty lists/dictionaries where applicable. @ In, None @ Out, None """ #FIXME: Since the similarity of this class with the base sampler, we should merge this BaseType.__init__(self) Assembler.__init__(self) self.counter = {} # Dict containing counters used for based and derived class self.counter['mdlEval'] = 0 # Counter of the model evaluation performed (better the input generated!!!). It is reset by calling the function self.initialize self.counter['varsUpdate'] = 0 # Counter of the optimization iteration. self.limit = {} # Dict containing limits for each counter self.limit['mdlEval'] = sys.maxsize # Maximum number of the loss function evaluation self.limit['varsUpdate'] = sys.maxsize # Maximum number of the optimization iteration. self.initSeed = None # Seed for random number generators self.optVars = None # Decision variables for optimization self.optVarsInit = {} # Dict containing upper/lower bounds and initial of each decision variables self.optVarsInit['upperBound'] = {} # Dict containing upper bounds of each decision variables self.optVarsInit['lowerBound'] = {} # Dict containing lower bounds of each decision variables self.optVarsInit['initial'] = {} # Dict containing initial values of each decision variables self.optVarsInit['ranges'] = {} # Dict of the ranges (min and max) of each variable's domain self.optVarsHist = {} # History of normalized decision variables for each iteration self.nVar = 0 # Number of decision variables self.objVar = None # Objective variable to be optimized self.optType = None # Either maximize or minimize self.optTraj = None # Identifiers of parallel optimization trajectories self.thresholdTrajRemoval = None # Threshold used to determine the convergence of parallel optimization trajectories self.paramDict = {} # Dict containing additional parameters for derived class self.absConvergenceTol = 0.0 # Convergence threshold (absolute value) self.relConvergenceTol = 1.e-3 # Convergence threshold (relative value) self.solutionExport = None #This is the data used to export the solution (it could also not be present) self.values = {} # for each variable the current value {'var name':value} self.inputInfo = {} # depending on the optimizer several different type of keywarded information could be present only one is mandatory, see below self.inputInfo['SampledVars' ] = self.values # this is the location where to get the values of the sampled variables self.constants = {} # dictionary of constants variables self.FIXME = False # FIXME flag self.printTag = self.type # prefix for all prints (optimizer type) self._endJobRunnable = sys.maxsize # max number of inputs creatable by the optimizer right after a job ends self.constraintFunction = None # External constraint function, could be not present self.mdlEvalHist = None # Containing information of all model evaluation self.objSearchingROM = None # ROM used internally for fast loss function evaluation self.addAssemblerObject('Restart' ,'-n',True) self.addAssemblerObject('TargetEvaluation','1') self.addAssemblerObject('Function','-1')
def __init__(self,runInfoDict): """ Constructor @ In, runInfoDict, dict, the dictionary containing the runInfo (read in the XML input file) @ Out, None """ BaseType.__init__(self) Assembler.__init__(self) #if alias are defined in the input it defines a mapping between the variable names in the framework and the one for the generation of the input #self.alias[framework variable name] = [input code name]. For Example, for a MooseBasedApp, the alias would be self.alias['internal_variable_name'] = 'Material|Fuel|thermal_conductivity' self.alias = {'input':{},'output':{}} self.subType = '' self.runQueue = [] self.printTag = 'MODEL' self.createWorkingDir = False
def __init__(self,runInfoDict): """ Constructor @ In, None @ Out, None """ BaseType.__init__(self) self.database = None # Database object self.exist = False # does it exist? self.built = False # is it built? self.filename = "" # filename self.workingDir = runInfoDict['WorkingDir'] self.databaseDir = self.workingDir # Database directory. Default = working directory. self.printTag = 'DATABASE' # For printing verbosity labels self.variables = None # if not None, list of specific variables requested to be stored by user
def __init__(self,runInfoDict): """ Constructor @ In, runInfoDict, dict, the dictionary containing the runInfo (read in the XML input file) @ Out, None """ BaseType.__init__(self) self.workingDir = runInfoDict['WorkingDir'] self.__functionFile = '' # function file name self.__actionDictionary = {} # action dictionary # dictionary of implemented actions self.__actionImplemented = {'residuumSign':False,'supportBoundingTest':False,'residuum':False,'gradient':False} self.__inputVariables = [] # list of variables' names' given in input (xml) self.__inputFromWhat = {} # dictionary of input data type self.__inputFromWhat['dict'] = self.__inputFromDict #self.__inputFromWhat['Data'] = self.__inputFromData self.printTag = 'FUNCTIONS'
def getInputSpecification(cls): """ Method to get a reference to a class that specifies the input data for class "cls". @ In, cls, the class for which we are retrieving the specification @ Out, inputSpecification, InputData.ParameterInput, class to use for specifying the input of cls. """ spec = BaseType.getInputSpecification() spec.addParam('dir', param_type=InputTypes.StringType, required=False) return spec
def __init__(self): """ Constructor @ In, None @ Out, None """ BaseType.__init__(self) self.parList = [ ] # List of list [[role played in the step, class type, specialization, global name (user assigned by the input)]] self.sleepTime = 0.005 # Waiting time before checking if a run is finished #If a step possess re-seeding instruction it is going to ask to the sampler to re-seed according # re-seeding = a number to be used as a new seed # re-seeding = 'continue' the use the already present random environment #If there is no instruction (self.initSeed = None) the sampler will reinitialize self.initSeed = None self._knownAttribute += [ 'sleepTime', 're-seeding', 'pauseAtEnd', 'fromDirectory' ] self._excludeFromModelValidation = ['SolutionExport'] self.printTag = 'STEPS'