예제 #1
0
파일: PointSet.py 프로젝트: mattdon/raven
    def _specializedLoadXMLandCSV(self, filenameRoot, options):
        """
      Function to load the xml additional file of the csv for data
      (it contains metadata, etc). It must be implemented by the specialized classes
      @ In, filenameRoot, string, file name root
      @ In, options, dict, dictionary -> options for loading
      @ Out, None
    """
        #For Pointset it will create an XML file and one CSV file.
        #The CSV file will have a header with the input names and output
        #names, and multiple lines of data with the input and output
        #numeric values, one line for each input.
        if options is not None and 'fileToLoad' in options.keys():
            name = os.path.join(options['fileToLoad'].getPath(),
                                options['fileToLoad'].getBase())
        else:
            name = self.name

        filenameLocal = os.path.join(filenameRoot, name)

        if os.path.isfile(filenameLocal + '.xml'):
            xmlData = self._loadXMLFile(filenameLocal)
            assert (xmlData["fileType"] == "Pointset")
            if "metadata" in xmlData:
                self._dataContainer['metadata'] = xmlData["metadata"]

            mainCSV = os.path.join(filenameRoot, xmlData["filenameCSV"])
        else:
            mainCSV = os.path.join(filenameRoot, name + '.csv')

        myFile = open(mainCSV, "rU")
        header = myFile.readline().rstrip()
        inoutKeys = header.split(",")
        inoutValues = [[] for _ in range(len(inoutKeys))]

        for line in myFile.readlines():
            lineList = line.rstrip().split(",")
            for i in range(len(inoutKeys)):
                inoutValues[i].append(utils.partialEval(lineList[i]))

        #extend the expected size of this point set
        self.numAdditionalLoadPoints = len(
            inoutValues[0])  #used in checkConsistency
        self._dataContainer['inputs'] = {}
        self._dataContainer['outputs'] = {}
        inoutDict = {}
        for key, value in zip(inoutKeys, inoutValues):
            inoutDict[key] = value

        for key in self.getParaKeys('inputs'):
            self._dataContainer["inputs"][key] = c1darray(
                values=np.array(inoutDict[key]))

        for key in self.getParaKeys('outputs'):
            self._dataContainer["outputs"][key] = c1darray(
                values=np.array(inoutDict[key]))
예제 #2
0
    def _updateSpecializedMetadata(self, name, value, options=None):
        """
      This function performs the updating of the values (metadata) into this Data
      @ In,  name, string, parameter name (ex. probability)
      @ In,  value, whatever type, newer value
      @ Out, None
      NB. This method, if the metadata name is already present, replaces it with the new value. No appending here, since the metadata are dishomogenius and a common updating strategy is not feasable.
    """
        valueType = None if utils.checkTypeRecursively(value) not in [
            'str', 'unicode', 'bytes'
        ] else object

        if options and self._dataParameters['hierarchical']:
            # we retrieve the node in which the specialized 'Point' has been stored
            parentID = None
            if 'metadata' in options.keys():
                prefix = options['metadata']['prefix']
                if 'parentID' in options['metadata'].keys():
                    parentID = options['metadata']['parentID']
            else:
                prefix = options['prefix']
                if 'parentID' in options.keys(): parentID = options['parentID']
            if parentID: tsnode = self.retrieveNodeInTreeMode(prefix, parentID)
            self._dataContainer = tsnode.get('dataContainer')
            if not self._dataContainer:
                tsnode.add('dataContainer', {'metadata': {}})
                self._dataContainer = tsnode.get('dataContainer')
            else:
                if 'metadata' not in self._dataContainer.keys():
                    self._dataContainer['metadata'] = {}
            if name in self._dataContainer['metadata'].keys():
                self._dataContainer['metadata'][name].append(
                    np.atleast_1d(value)
                )  # = np.concatenate((self._dataContainer['metadata'][name],np.atleast_1d(value)))
            else:
                valueToAdd = np.array(
                    value, dtype=valueType
                ) if valueType is not None else np.array(value)
                self._dataContainer['metadata'][name] = c1darray(
                    values=np.atleast_1d(valueToAdd))
            self.addNodeInTreeMode(tsnode, options)
        else:
            if name in self._dataContainer['metadata'].keys():
                self._dataContainer['metadata'][name].append(
                    np.atleast_1d(value)
                )  # = np.concatenate((self._dataContainer['metadata'][name],np.atleast_1d(value)))
            else:
                valueToAdd = np.array(
                    value, dtype=valueType
                ) if valueType is not None else np.array(value)
                self._dataContainer['metadata'][name] = c1darray(
                    values=np.atleast_1d(valueToAdd))
예제 #3
0
파일: PointSet.py 프로젝트: mattdon/raven
    def _updateSpecializedOutputValue(self, name, value, options=None):
        """
      This function performs the updating of the values (output space) into this Data
      @ In,  name, string, parameter name (ex. cladTemperature)
      @ In,  value, float, newer value (single value)
      @ Out, None
    """
        if options and self._dataParameters['hierarchical']:
            # we retrieve the node in which the specialized 'Point' has been stored
            parentID = None
            if 'metadata' in options.keys():
                prefix = options['metadata']['prefix']
                if 'parentID' in options['metadata'].keys():
                    parentID = options['metadata']['parentID']
            else:
                prefix = options['prefix']
                if 'parentID' in options.keys():
                    parentID = options['parentID']
            if parentID:
                tsnode = self.retrieveNodeInTreeMode(prefix, parentID)

            #if 'parentID' in options.keys(): tsnode = self.retrieveNodeInTreeMode(options['prefix'], options['parentID'])
            #else:                             tsnode = self.retrieveNodeInTreeMode(options['prefix'])
            # we store the pointer to the container in the self._dataContainer because checkConsistency acts on this
            self._dataContainer = tsnode.get('dataContainer')
            if not self._dataContainer:
                tsnode.add('dataContainer', {'inputs': {}, 'outputs': {}})
                self._dataContainer = tsnode.get('dataContainer')
            if name in self._dataContainer['outputs'].keys():
                self._dataContainer['outputs'].pop(name)
            if name not in self._dataParameters['outParam']:
                self._dataParameters['outParam'].append(name)
            self._dataContainer['outputs'][name] = c1darray(
                values=np.atleast_1d(
                    value))  #np.atleast_1d(np.atleast_1d(value)[-1])
            self.addNodeInTreeMode(tsnode, options)
        else:
            if name in self._dataContainer['outputs'].keys():
                #popped = self._dataContainer['outputs'].pop(name)
                self._dataContainer['outputs'][name].append(
                    np.atleast_1d(value)[-1]
                )  #= copy.copy(np.concatenate((np.array(popped), np.atleast_1d(np.atleast_1d(value)[-1]))))
            else:
                if name not in self._dataParameters['outParam']:
                    self._dataParameters['outParam'].append(name)
                self._dataContainer['outputs'][name] = c1darray(
                    values=np.atleast_1d(np.atleast_1d(
                        value)[-1]))  # np.atleast_1d(np.atleast_1d(value)[-1])
예제 #4
0
파일: PointSet.py 프로젝트: since801/raven
    def _specializedLoadXMLandCSV(self, filenameRoot, options):
        """
      Function to load the xml additional file of the csv for data
      (it contains metadata, etc). It must be implemented by the specialized classes
      @ In, filenameRoot, string, file name root
      @ In, options, dict, dictionary -> options for loading
      @ Out, None
    """
        #For Pointset it will create an XML file and one CSV file.
        #The CSV file will have a header with the input names and output
        #names, and multiple lines of data with the input and output
        #numeric values, one line for each input.
        if options is not None and 'fileToLoad' in options.keys():
            name = os.path.join(options['fileToLoad'].getPath(),
                                options['fileToLoad'].getBase())
        else:
            name = self.name

        filenameLocal = os.path.join(filenameRoot, name)

        if os.path.isfile(filenameLocal + '.xml'):
            xmlData = self._loadXMLFile(filenameLocal)
            assert (xmlData["fileType"] == "Pointset")
            if "metadata" in xmlData:
                self._dataContainer['metadata'] = xmlData["metadata"]

            mainCSV = os.path.join(filenameRoot, xmlData["filenameCSV"])
        else:
            mainCSV = os.path.join(filenameRoot, name + '.csv')

        myFile = open(mainCSV, "rU")
        header = myFile.readline().rstrip()
        inoutKeys = header.split(",")
        inoutValues = [[] for _ in range(len(inoutKeys))]

        for lineNumber, line in enumerate(myFile.readlines(), 2):
            lineList = line.rstrip().split(",")
            for i in range(len(inoutKeys)):
                datum = utils.partialEval(lineList[i])
                if datum == '':
                    self.raiseAnError(
                        IOError,
                        'Invalid data in input file: {} at line {}: "{}"'.
                        format(filenameLocal, lineNumber, line.rstrip()))
                inoutValues[i].append(utils.partialEval(lineList[i]))

        #extend the expected size of this point set
        self.numAdditionalLoadPoints += len(
            inoutValues[0])  #used in checkConsistency

        ## Do not reset these containers because it will wipe whatever information
        ## already exists in this PointSet. This is not one of the use cases for our
        ## data objects. We claim in the manual to construct or update information.
        ## These should be non-destructive operations. -- DPM 6/26/17
        # self._dataContainer['inputs'] = {}
        # self._dataContainer['outputs'] = {}
        inoutDict = {}
        for key, value in zip(inoutKeys, inoutValues):
            inoutDict[key] = value

        for key in self.getParaKeys('inputs'):
            ## Again, in order to be non-destructive we should only initialize on the
            ## first go-around, subsequent loads should append to the existing list.
            ## -- DPM 6/26/17
            if key not in self._dataContainer["inputs"]:
                self._dataContainer["inputs"][key] = c1darray(
                    values=np.array(inoutDict[key]))
            else:
                self._dataContainer["inputs"][key].append(
                    c1darray(values=np.array(inoutDict[key])))

        for key in self.getParaKeys('outputs'):
            ## Again, in order to be non-destructive we should only initialize on the
            ## first go-around, subsequent loads should append to the existing list.
            ## -- DPM 6/26/17
            if key not in self._dataContainer["outputs"]:
                self._dataContainer["outputs"][key] = c1darray(
                    values=np.array(inoutDict[key]))
            else:
                self._dataContainer["outputs"][key].append(
                    c1darray(values=np.array(inoutDict[key])))
예제 #5
0
파일: PointSet.py 프로젝트: since801/raven
    def _updateSpecializedOutputValue(self, name, value, options=None):
        """
      This function performs the updating of the values (output space) into this Data
      @ In,  name, string, parameter name (ex. cladTemperature)
      @ In,  value, float, newer value (single value)
      @ Out, None
    """
        ## So, you are trying to update a single data point, but you passed in
        ## more information, this means we need to reduce it down using one of our
        ## recipes.
        value = np.atleast_1d(value).flatten()
        if len(value) > 1:

            if options is None:
                outputRow = -1
                outputPivotVal = None
                operator = None
            else:
                ## Not sure if any of these are necessary, but I am trying to replicate
                ## the magic that takes place in the CsvLoader -- DPM 5/3/2017
                outputRow = copy.deepcopy(options.get('outputRow', -1))
                outputPivotVal = options.get('outputPivotValue', None)
                operator = options.get('operator', None)

            if outputRow is None:
                if outputPivotVal is not None and 'end' in outputPivotVal:
                    outputRow = -1
                # elif outputPivotVal != None:
                #   outputPivotVal = float(outputPivotVal)

            if operator == 'max':
                value = np.max(value)
            elif operator == 'min':
                value = np.min(value)
            elif operator == 'average':
                value = np.average(value)
            else:  #elif outputRow is not None:
                value = value[outputRow]
            ## We don't have access to the pivot parameter's information at this
            ## point, so I will forego this implementation for now -- DPM 5/3/2017
            #else:
            #  value = interp1d(data[:,pivotIndex], value, kind='linear')(outputPivotVal)

        if options and self._dataParameters['hierarchical']:
            # we retrieve the node in which the specialized 'Point' has been stored
            parentID = None
            if 'metadata' in options.keys():
                prefix = options['metadata']['prefix']
                if 'parentID' in options['metadata'].keys():
                    parentID = options['metadata']['parentID']
            else:
                prefix = options['prefix']
                if 'parentID' in options.keys():
                    parentID = options['parentID']
            if parentID:
                tsnode = self.retrieveNodeInTreeMode(prefix, parentID)

            #if 'parentID' in options.keys(): tsnode = self.retrieveNodeInTreeMode(options['prefix'], options['parentID'])
            #else:                             tsnode = self.retrieveNodeInTreeMode(options['prefix'])
            # we store the pointer to the container in the self._dataContainer because checkConsistency acts on this
            self._dataContainer = tsnode.get('dataContainer')
            if not self._dataContainer:
                tsnode.add('dataContainer', {'inputs': {}, 'outputs': {}})
                self._dataContainer = tsnode.get('dataContainer')
            if name in self._dataContainer['outputs'].keys():
                self._dataContainer['outputs'].pop(name)
            if name not in self._dataParameters['outParam']:
                self._dataParameters['outParam'].append(name)
            self._dataContainer['outputs'][name] = c1darray(
                values=np.atleast_1d(
                    value))  #np.atleast_1d(np.atleast_1d(value)[-1])
            self.addNodeInTreeMode(tsnode, options)
        else:
            if name in self._dataContainer['outputs'].keys():
                #popped = self._dataContainer['outputs'].pop(name)
                self._dataContainer['outputs'][name].append(
                    np.atleast_1d(value)[-1]
                )  #= copy.copy(np.concatenate((np.array(popped), np.atleast_1d(np.atleast_1d(value)[-1]))))
            else:
                if name not in self._dataParameters['outParam']:
                    self._dataParameters['outParam'].append(name)
                self._dataContainer['outputs'][name] = c1darray(
                    values=np.atleast_1d(np.atleast_1d(
                        value)[-1]))  # np.atleast_1d(np.atleast_1d(value)[-1])
예제 #6
0
파일: PointSet.py 프로젝트: since801/raven
 def _updateSpecializedInputValue(self, name, value, options=None):
     """
   This function performs the updating of the values (input space) into this Data
   @ In,  name, string, parameter name (ex. cladTemperature)
   @ In,  value, float, newer value (single value)
   @ Out, None
 """
     # if this flag is true, we accept realizations in the input space that are not only scalar but can be 1-D arrays!
     #acceptArrayRealizations = False if options == None else options.get('acceptArrayRealizations',False)
     unstructuredInput = False
     if isinstance(value, (np.ndarray, c1darray)):
         if np.asarray(value).ndim > 1 and max(
                 np.asarray(value).shape) != np.asarray(value).size:
             self.raiseAnError(
                 NotConsistentData,
                 'PointSet Data accepts only a 1 Dimensional numpy array or a single value for method <_updateSpecializedInputValue>. Array shape is '
                 + str(value.shape))
         #if value.size != 1 and not acceptArrayRealizations: self.raiseAnError(NotConsistentData,'PointSet Data accepts only a numpy array of dim 1 or a single value for method <_updateSpecializedInputValue>. Size is ' + str(value.size))
         unstructuredInput = True if value.size > 1 else False
     if options and self._dataParameters['hierarchical']:
         # we retrieve the node in which the specialized 'Point' has been stored
         parentID = None
         if 'metadata' in options.keys():
             prefix = options['metadata']['prefix']
             if 'parentID' in options['metadata'].keys():
                 parentID = options['metadata']['parentID']
         else:
             prefix = options['prefix']
             if 'parentID' in options.keys():
                 parentID = options['parentID']
         if parentID:
             tsnode = self.retrieveNodeInTreeMode(prefix, parentID)
         else:
             tsnode = self.retrieveNodeInTreeMode(prefix)
         self._dataContainer = tsnode.get('dataContainer')
         if not self._dataContainer:
             tsnode.add('dataContainer', {
                 'inputs': {},
                 'unstructuredInputs': {},
                 'outputs': {}
             })
             self._dataContainer = tsnode.get('dataContainer')
         if name in self._dataContainer['inputs'].keys():
             self._dataContainer['inputs'].pop(name)
         if name in self._dataContainer['unstructuredInputs'].keys():
             self._dataContainer['unstructuredInputs'].pop(name)
         if name not in self._dataParameters['inParam']:
             self._dataParameters['inParam'].append(name)
         if not unstructuredInput:
             self._dataContainer['inputs'][name] = c1darray(
                 values=np.atleast_1d(np.ravel(value)[-1]))
         else:
             self._dataContainer['unstructuredInputs'][name] = [
                 c1darray(values=np.atleast_1d(np.ravel(value)))
             ]
         #self._dataContainer['inputs'][name] = c1darray(values=np.atleast_1d(np.atleast_1d(value)[-1])) if not acceptArrayRealizations else c1darray(values=np.atleast_1d(np.atleast_1d(value)))
         self.addNodeInTreeMode(tsnode, options)
     else:
         if name in itertools.chain(
                 self._dataContainer['inputs'].keys(),
                 self._dataContainer['unstructuredInputs'].keys()):
             #popped = self._dataContainer['inputs'].pop(name)
             if not unstructuredInput:
                 self._dataContainer['inputs'][name].append(
                     np.atleast_1d(np.ravel(value)[-1]))
             else:
                 self._dataContainer['unstructuredInputs'][name].append(
                     np.atleast_1d(np.ravel(value)))
             #self._dataContainer['inputs'][name] = c1darray(values=np.atleast_1d(np.atleast_1d(value)[-1]))                     copy.copy(np.concatenate((np.atleast_1d(np.array(popped)), np.atleast_1d(np.atleast_1d(value)[-1]))))
         else:
             if name not in self._dataParameters['inParam']:
                 self._dataParameters['inParam'].append(name)
             #if name not in self._dataParameters['inParam']: self.raiseAnError(NotConsistentData,'The input variable '+name+'is not among the input space of the DataObject '+self.name)
             #self._dataContainer['inputs'][name] = c1darray(values=np.atleast_1d(np.atleast_1d(value)[-1])) if not acceptArrayRealizations else c1darray(values=np.atleast_1d(np.atleast_1d(value)))
             if not unstructuredInput:
                 self._dataContainer['inputs'][name] = c1darray(
                     values=np.atleast_1d(np.ravel(value)[-1]))
             else:
                 self._dataContainer['unstructuredInputs'][name] = [
                     c1darray(values=np.atleast_1d(np.ravel(value)))
                 ]
예제 #7
0
    def _specializedLoadXMLandCSV(self, filenameRoot, options):
        """
      Function to load the xml additional file of the csv for data
      (it contains metadata, etc). It must be implemented by the specialized classes
      @ In, filenameRoot, string, file name root
      @ In, options, dict, dictionary -> options for loading
      @ Out, None
    """
        #For HistorySet, create an XML file, and multiple CSV
        #files.  The first CSV file has a header with the input names,
        #and a column for the filenames.  There is one CSV file for each
        #data line in the first CSV and they are named with the
        #filename.  They have the output names for a header, a column
        #for time, and the rest of the file is data for different times.
        if options is not None and 'fileToLoad' in options.keys():
            name = os.path.join(options['fileToLoad'].getPath(),
                                options['fileToLoad'].getBase())
        else:
            name = self.name

        filenameLocal = os.path.join(filenameRoot, name)

        if os.path.isfile(filenameLocal + '.xml'):
            xmlData = self._loadXMLFile(filenameLocal)
            assert (xmlData["fileType"] == "HistorySet")
            if "metadata" in xmlData:
                self._dataContainer['metadata'] = xmlData["metadata"]
            mainCSV = os.path.join(filenameRoot, xmlData["filenameCSV"])
        else:
            mainCSV = os.path.join(filenameRoot, name + '.csv')

        myFile = open(mainCSV, "rU")
        header = myFile.readline().rstrip()
        inpKeys = header.split(",")[:-1]
        inpValues = []
        outKeys = []
        outValues = []
        allLines = myFile.readlines()
        for mainLine in allLines:
            mainLineList = mainLine.rstrip().split(",")
            inpValues_h = [utils.partialEval(a) for a in mainLineList[:-1]]
            inpValues.append(inpValues_h)
            dataFilename = mainLineList[-1]
            subCSVFilename = os.path.join(filenameRoot, dataFilename)
            subCSVFile = Files.returnInstance("CSV", self)
            subCSVFile.setFilename(subCSVFilename)
            self._toLoadFromList.append(subCSVFile)
            with open(subCSVFilename, "rU") as myDataFile:
                header = myDataFile.readline().rstrip()
                outKeys_h = header.split(",")
                outValues_h = [[] for a in range(len(outKeys_h))]
                for lineNumber, line in enumerate(myDataFile.readlines(), 2):
                    lineList = line.rstrip().split(",")
                    for i in range(len(outKeys_h)):
                        datum = utils.partialEval(lineList[i])
                        if datum == '':
                            self.raiseAnError(
                                IOError,
                                'Invalid data in input file: {} at line {}: "{}"'
                                .format(subCSVFilename, lineNumber,
                                        line.rstrip()))
                        outValues_h[i].append(datum)
                myDataFile.close()
            outKeys.append(outKeys_h)
            outValues.append(outValues_h)

        ## Do not reset these containers because it will wipe whatever information
        ## already exists in this HistorySet. This is not one of the use cases for
        ## our data objects. We claim in the manual to construct or update
        ## information. These should be non-destructive operations. -- DPM 6/26/17
        # self._dataContainer['inputs'] = {} #XXX these are indexed by 1,2,...
        # self._dataContainer['outputs'] = {} #XXX these are indexed by 1,2,...
        startKey = len(self._dataContainer['inputs'].keys())
        for i in range(len(inpValues)):
            mainKey = startKey + i + 1
            subInput = {}
            subOutput = {}
            for key, value in zip(inpKeys, inpValues[i]):
                #subInput[key] = c1darray(values=np.array([value]*len(outValues[0][0])))
                if key in self.getParaKeys('inputs'):
                    subInput[key] = c1darray(values=np.array([value]))
            for key, value in zip(outKeys[i], outValues[i]):
                if key in self.getParaKeys('outputs'):
                    subOutput[key] = c1darray(values=np.array(value))

            self._dataContainer['inputs'][mainKey] = subInput
            self._dataContainer['outputs'][mainKey] = subOutput

        #extend the expected size of this point set
        self.numAdditionalLoadPoints += len(
            allLines)  #used in checkConsistency

        self.checkConsistency()
예제 #8
0
    def _updateSpecializedOutputValue(self, name, value, options=None):
        """
      This function performs the updating of the values (output space) into this Data
      @ In,  name, either 1) list (size = 2), name[0] == history number(ex. 1 or 2 etc) - name[1], parameter name (ex. cladTemperature)
                       or 2) string, parameter name (ex. cladTemperature) -> in this second case,the parameter is added in the last history (if not present),
                                                                             otherwise a new history is created and the new value is inserted in it
      @ In, value, ?, ?
      @ Out, None
    """
        if isinstance(value, np.ndarray):
            #self.raiseADebug('FIXME: Converted np.ndarray into c1darray in HistorySet!')
            value = c1darray(values=value)
        if not isinstance(value, c1darray):
            self.raiseAnError(
                NotConsistentData,
                'HistorySet Data accepts only cached_ndarray as type for method <_updateSpecializedOutputValue>. Got '
                + str(type(value)))

        if options and self._dataParameters['hierarchical']:
            parentID = None
            if type(name) == list:
                namep = name[1]
                if type(name[0]) == str:
                    nodeId = name[0]
                else:
                    if 'metadata' in options.keys():
                        nodeId = options['metadata']['prefix']
                        if 'parentID' in options['metadata'].keys():
                            parentID = options['metadata']['parentID']
                    else:
                        nodeId = options['prefix']
                        if 'parentID' in options.keys():
                            parentID = options['parentID']
            else:
                if 'metadata' in options.keys():
                    nodeId = options['metadata']['prefix']
                    if 'parentID' in options['metadata'].keys():
                        parentID = options['metadata']['parentID']
                else:
                    nodeId = options['prefix']
                    if 'parentID' in options.keys():
                        parentID = options['parentID']
                namep = name
            if parentID:
                tsnode = self.retrieveNodeInTreeMode(nodeId, parentID)

            # we store the pointer to the container in the self._dataContainer because checkConsistency acts on this
            self._dataContainer = tsnode.get('dataContainer')
            if not self._dataContainer:
                tsnode.add('dataContainer', {'inputs': {}, 'outputs': {}})
                self._dataContainer = tsnode.get('dataContainer')
            if namep in self._dataContainer['outputs'].keys():
                self._dataContainer['outputs'].pop(namep)
            if namep not in self._dataParameters['outParam']:
                self._dataParameters['outParam'].append(namep)
            self._dataContainer['outputs'][namep] = c1darray(
                values=np.atleast_1d(np.array(value, dtype=float)))
            self.addNodeInTreeMode(tsnode, options)
        else:
            resultsArray = c1darray(
                values=np.atleast_1d(np.array(value, dtype=float)))
            if type(name) == list:
                # there are info regarding the history number
                try:
                    self._dataContainer['outputs'][name[0]][
                        name[1]] = resultsArray
                except KeyError:
                    self._dataContainer['outputs'][name[0]] = {
                        name[1]: resultsArray
                    }
            else:
                # no info regarding the history number => use internal counter
                if len(self._dataContainer['outputs']) == 0:
                    self._dataContainer['outputs'][1] = {name: resultsArray}
                else:
                    hisn = max(self._dataContainer['outputs'].keys())
                    if name in list(
                            self._dataContainer['outputs'].values())[-1]:
                        hisn += 1
                        self._dataContainer['outputs'][hisn] = {}
                    self._dataContainer['outputs'][hisn][name] = copy.copy(
                        resultsArray
                    )  #FIXME why deepcopy here but not elsewhere?
예제 #9
0
 def _updateSpecializedInputValue(self, name, value, options=None):
     """
   This function performs the updating of the values (input space) into this Data
   @ In,  name, either 1) list (size = 2), name[0] == history number(ex. 1 or 2 etc) - name[1], parameter name (ex. cladTemperature)
                    or 2) string, parameter name (ex. cladTemperature) -> in this second case,the parameter is added in the last history (if not present),
                                                                          otherwise a new history is created and the new value is inserted in it
   @ In, value, newer value
   @ Out, None
 """
     # if this flag is true, we accept realizations in the input space that are not only scalar but can be 1-D arrays!
     #acceptArrayRealizations = False if options == None else options.get('acceptArrayRealizations',False)
     unstructuredInput = False
     if isinstance(value, np.ndarray):
         if value.shape == (
         ):  #can't cast single-entry ND array into a c1darray, so make it into a single entry
             value = value.dtype.type(value)
         else:
             value = c1darray(values=value)
     if not isinstance(value, (float, int, bool, c1darray)):
         self.raiseAnError(
             NotConsistentData,
             'HistorySet Data accepts only a numpy array  or a single value for method <_updateSpecializedInputValue>. Got type '
             + str(type(value)))
     if isinstance(value, c1darray):
         if np.asarray(value).ndim > 1 and max(
                 value.values.shape) != np.asarray(value).size:
             self.raiseAnError(
                 NotConsistentData,
                 'HistorySet Data accepts only a 1 Dimensional numpy array or a single value for method <_updateSpecializedInputValue>. Array shape is '
                 + str(value.shape))
         #if value.size != 1 and not acceptArrayRealizations: self.raiseAnError(NotConsistentData,'HistorySet Data accepts only a numpy array of dim 1 or a single value for method <_updateSpecializedInputValue>. Size is ' + str(value.size))
         unstructuredInput = True if value.size > 1 else False
     containerType = 'inputs' if not unstructuredInput else 'unstructuredInputs'
     if options and self._dataParameters['hierarchical']:
         # we retrieve the node in which the specialized 'History' has been stored
         parentID = None
         if type(name) == list:
             namep = name[1]
             if type(name[0]) == str:
                 nodeId = name[0]
             else:
                 if 'metadata' in options.keys():
                     nodeId = options['metadata']['prefix']
                     if 'parentID' in options['metadata'].keys():
                         parentID = options['metadata']['parentID']
                 else:
                     nodeId = options['prefix']
                     if 'parentID' in options.keys():
                         parentID = options['parentID']
         else:
             if 'metadata' in options.keys():
                 nodeId = options['metadata']['prefix']
                 if 'parentID' in options['metadata'].keys():
                     parentID = options['metadata']['parentID']
             else:
                 nodeId = options['prefix']
                 if 'parentID' in options.keys():
                     parentID = options['parentID']
             namep = name
         if parentID:
             tsnode = self.retrieveNodeInTreeMode(nodeId, parentID)
         else:
             tsnode = self.retrieveNodeInTreeMode(nodeId)
         self._dataContainer = tsnode.get('dataContainer')
         if not self._dataContainer:
             tsnode.add('dataContainer', {
                 'inputs': {},
                 'unstructuredInputs': {},
                 'outputs': {}
             })
             self._dataContainer = tsnode.get('dataContainer')
         if namep in self._dataContainer[containerType].keys():
             self._dataContainer[containerType].pop(name)
         if namep not in self._dataParameters['inParam']:
             self._dataParameters['inParam'].append(namep)
         self._dataContainer[containerType][namep] = c1darray(
             values=np.ravel(value))  # np.atleast_1d(np.array(value))
         self.addNodeInTreeMode(tsnode, options)
     else:
         if type(name) == list:
             # there are info regarding the history number
             if name[0] in self._dataContainer[containerType].keys():
                 gethistory = self._dataContainer[containerType].pop(
                     name[0])
                 gethistory[name[1]] = c1darray(
                     values=np.ravel(np.array(value, dtype=float)))
                 self._dataContainer[containerType][name[0]] = gethistory
             else:
                 self._dataContainer[containerType][name[0]] = {
                     name[1]:
                     c1darray(values=np.ravel(np.array(value, dtype=float)))
                 }
         else:
             # no info regarding the history number => use internal counter
             if len(self._dataContainer[containerType].keys()) == 0:
                 self._dataContainer[containerType][1] = {
                     name:
                     c1darray(values=np.ravel(np.array(value, dtype=float)))
                 }
             else:
                 hisn = max(self._dataContainer[containerType].keys())
                 if name in list(
                         self._dataContainer[containerType].values())[-1]:
                     hisn += 1
                     self._dataContainer[containerType][hisn] = {}
                 self._dataContainer[containerType][hisn][name] = c1darray(
                     values=np.ravel(np.array(
                         value,
                         dtype=float)))  # np.atleast_1d(np.array(value))
예제 #10
0
  """
    if abs(value - expected) > tol:
        print("checking answer", comment, value, "!=", expected)
        if updateResults:
            results["fail"] += 1
        return False
    else:
        if updateResults:
            results["pass"] += 1
        return True


#establish test array
origin = np.array([-3.14, 2.99792, 2.718, 8.987, 0.618])
#test init
testArray = cached_ndarray.c1darray(values=origin)

#test iter, getitem
for i, val in enumerate(testArray):
    checkAnswer('content storage indexing', val, origin[i])

#test len
checkAnswer('array length', len(testArray), 5)

#test append single value
testArray.append(-6.626)
checkAnswer('append value', testArray[-1], -6.626)
#test append array
testArray.append(np.array([12.56, 6.67]))
checkAnswer('append array, 0', testArray[-2], 12.56)
checkAnswer('append array, 1', testArray[-1], 6.67)
예제 #11
0
    def _specializedLoadXMLandCSV(self, filenameRoot, options):
        """
      Function to load the xml additional file of the csv for data
      (it contains metadata, etc). It must be implemented by the specialized classes
      @ In, filenameRoot, string, file name root
      @ In, options, dict, dictionary -> options for loading
      @ Out, None
    """
        #For HistorySet, create an XML file, and multiple CSV
        #files.  The first CSV file has a header with the input names,
        #and a column for the filenames.  There is one CSV file for each
        #data line in the first CSV and they are named with the
        #filename.  They have the output names for a header, a column
        #for time, and the rest of the file is data for different times.
        if options is not None and 'fileToLoad' in options.keys():
            name = os.path.join(options['fileToLoad'].getPath(),
                                options['fileToLoad'].getBase())
        else:
            name = self.name

        filenameLocal = os.path.join(filenameRoot, name)

        if os.path.isfile(filenameLocal + '.xml'):
            xmlData = self._loadXMLFile(filenameLocal)
            assert (xmlData["fileType"] == "HistorySet")
            if "metadata" in xmlData:
                self._dataContainer['metadata'] = xmlData["metadata"]
            mainCSV = os.path.join(filenameRoot, xmlData["filenameCSV"])
        else:
            mainCSV = os.path.join(filenameRoot, name + '.csv')

        myFile = open(mainCSV, "rU")
        header = myFile.readline().rstrip()
        inpKeys = header.split(",")[:-1]
        inpValues = []
        outKeys = []
        outValues = []
        allLines = myFile.readlines()
        for mainLine in allLines:
            mainLineList = mainLine.rstrip().split(",")
            inpValues_h = [utils.partialEval(a) for a in mainLineList[:-1]]
            inpValues.append(inpValues_h)
            dataFilename = mainLineList[-1]
            subCSVFilename = os.path.join(filenameRoot, dataFilename)
            myDataFile = open(subCSVFilename, "rU")
            subCSVFile = Files.returnInstance("CSV", self)
            subCSVFile.setFilename(subCSVFilename)
            self._toLoadFromList.append(subCSVFile)
            header = myDataFile.readline().rstrip()
            outKeys_h = header.split(",")
            outValues_h = [[] for a in range(len(outKeys_h))]
            for line in myDataFile.readlines():
                lineList = line.rstrip().split(",")
                for i in range(len(outKeys_h)):
                    outValues_h[i].append(utils.partialEval(lineList[i]))
            myDataFile.close()
            outKeys.append(outKeys_h)
            outValues.append(outValues_h)
        self._dataContainer['inputs'] = {}  #XXX these are indexed by 1,2,...
        self._dataContainer['outputs'] = {}  #XXX these are indexed by 1,2,...
        for i in range(len(inpValues)):
            mainKey = i + 1
            subInput = {}
            subOutput = {}
            for key, value in zip(inpKeys, inpValues[i]):
                #subInput[key] = c1darray(values=np.array([value]*len(outValues[0][0])))
                if key in self.getParaKeys('inputs'):
                    subInput[key] = c1darray(values=np.array([value]))
            for key, value in zip(outKeys[i], outValues[i]):
                if key in self.getParaKeys('outputs'):
                    subOutput[key] = c1darray(values=np.array(value))
            self._dataContainer['inputs'][mainKey] = subInput
            self._dataContainer['outputs'][mainKey] = subOutput
        #extend the expected size of this point set
        self.numAdditionalLoadPoints = len(allLines)  #used in checkConsistency

        self.checkConsistency()