Esempio n. 1
0
def getRelativeSortedListEntry(sortedList,value,tol=1e-15):
  """
    !!WARNING!! This method expects "sortedList" to already be a sorted list of float values!
    There are faster methods if they are not floats, and this will NOT work at all on unsorted lists.
    - Looks for a (close enough) match to "value" in "sortedList" using binomial search.  If found,
    returns the index and value of the matching entry.  If not found, adds a new entry to the sortedList
    and returns the new index with the original value.
    It is recommended that this method be used to add ALL entries into the sorted list to keep it sorted.
    @ In, sortedList, list, list of __sorted__ float values
    @ In, value, float, value to search for match
    @ Out, sortedList, list, possibly modified by still ordered list of floats
    @ Out, match_index, int, index of match in sortedList
    @ Out, match, float, matching float
  """
  from utils.mathUtils import compareFloats #necessary to prevent errors at module load
  index = bisect.bisect_left(sortedList,value)
  match_index = None
  match = None
  #if "value" is smallest value in list...
  if index == 0:
    if len(sortedList)>0:
    #check if current first matches
      if compareFloats(sortedList[0], value, tol=tol):
        match = sortedList[0]
        match_index = index
  #if "value" is largest value in list...
  elif index > len(sortedList)-1:
    #check if current last matches
    if compareFloats(sortedList[-1], value, tol=tol):
      match = sortedList[-1]
      match_index = len(sortedList)-1
  #if "value" is in the middle...
  else:
    #check both neighbors (left and right) for a match
    for idx in [index-1, index]:
      if compareFloats(sortedList[idx], value, tol=tol):
        match = sortedList[idx]
        match_index = idx
  #if no match found, add it
  if match is None:
    sortedList.insert(index,value)
    match = value
    match_index = index
  return sortedList,match_index,match
Esempio n. 2
0
def compare(s1,s2,relTolerance = 1e-14):
  """
    Method aimed to compare two strings. This method tries to convert the 2
    strings in float and uses an integer representation to compare them.
    In case the conversion is not possible (string or only one of the strings is
    convertable), the method compares strings as they are.
    @ In, s1, string, first string to be compared
    @ In, s2, string, second string to be compared
    @ In, relTolerance, float, relative tolerance
    @ Out, response, bool, the boolean response (True if s1==s2, False otherwise)
  """
  w1, w2 = floatConversion(s1), floatConversion(s2)
  if   type(w1) == type(w2) and type(w1) != float:
    return s1 == s2
  elif type(w1) == type(w2) and type(w1) == float:
    from utils import mathUtils
    return mathUtils.compareFloats(w1,w2,relTolerance)
  elif type(w1) != type(w2) and type(w1) in [float,int] and type(w2) in [float,int]:
    w1, w2 = float(w1), float(w2)
    return compare(w1,w2)
  else:
    return (w1 == w2)
Esempio n. 3
0
  checkArray('distance %s' %str(f),dist,dists[i],1e-5)


### check "numpyNearestMatch"
findIn = np.array([(1,1,1),(2,2,2),(3,3,3)])
find =    [(0,0,0),(1,2,1),(1,2,2),(2,2,2),(10,10,10)]
idcs    = [   0   ,   0   ,   1   ,   1   ,    2     ]
correct = [(1,1,1),(1,1,1),(2,2,2),(2,2,2),( 3, 3, 3)]
for i,f in enumerate(find):
  idx,ary = mathUtils.numpyNearestMatch(findIn,f)
  checkAnswer('numpyNearersMatch %s' %str(f),idx,idcs[i],1e-5)
  checkArray('numpyNearersMatch %s' %str(f),ary,correct[i],1e-5)

### check float comparison
#moderate order of magnitude
checkTrue('compareFloats moderate OoM match',mathUtils.compareFloats(3.141592,3.141593,tol=1e-6),True)
checkTrue('compareFloats moderate OoM mismatch',mathUtils.compareFloats(3.141592,3.141593,tol=1e-8),False)
#small order of magnitude
checkTrue('compareFloats small OoM match',mathUtils.compareFloats(3.141592e-15,3.141593e-15,tol=1e-6),True)
checkTrue('compareFloats small OoM mismatch',mathUtils.compareFloats(3.141592e-15,3.141593e-15,tol=1e-8),False)
#small order of magnitude
checkTrue('compareFloats large OoM match',mathUtils.compareFloats(3.141592e15,3.141593e15,tol=1e-6),True)
checkTrue('compareFloats large OoM mismatch',mathUtils.compareFloats(3.141592e15,3.141593e15,tol=1e-8),False)


### check "NDinArray"
points = np.array([(0.61259532,0.27325707,0.81182424),
                   (0.54608679,0.82470626,0.39170769)])
findSmall = (0.55,0.82,0.39)
findLarge = (0.61259532123,0.27325707123,0.81182423999)
found,idx,entry = mathUtils.NDInArray(points,findSmall,tol=1e-2)
Esempio n. 4
0
    def finalizeCodeOutput(self, command, output, workingDir):
        """
      Called by RAVEN to modify output files (if needed) so that they are in a proper form.
      In this case, the default .mat output needs to be converted to .csv output, which is the
      format that RAVEN can communicate with.
      @ In, command, string, the command used to run the just ended job
      @ In, output, string, the Output name root
      @ In, workingDir, string, current working dir
      @ Out, output, string, optional, present in case the root of the output file gets changed in this method.
    """
        _vars = {}
        _blocks = []
        _namesData1 = []
        _namesData2 = []
        _timeSeriesData1 = []
        _timeSeriesData2 = []

        # Load the output file (.mat file) that have been generated by running Dymola executable and
        #   store the data in this file to variable 'mat'.
        matSourceFileName = os.path.join(workingDir, output)
        matSourceFileName += '.mat'
        ###################################################################
        #FIXME: LOADMAT HAS A DIFFERENT BEHAVIOR IN SCIPY VERSION >= 0.18 #
        #if int(scipy.__version__.split(".")[1])>17:
        #  warnings.warn("SCIPY version >0.17.xx has a different behavior in reading .mat files!")
        mat = scipy.io.loadmat(matSourceFileName, chars_as_strings=False)

        ###################################################################

        # Define the functions that extract strings from the matrix:
        #  - strMatNormal: for parallel string
        #  - strMatTrans:  for vertical string
        # These functions join the strings together, resulting in one string in each row, and remove
        #   trailing whitespace.
        strMatNormal = lambda a: [''.join(s).rstrip() for s in a]
        strMatTrans = lambda a: [''.join(s).rstrip() for s in zip(*a)]

        # Define the function that returns '1.0' with the sign of 'x'
        sign = lambda x: math.copysign(1.0, x)

        # Check the structure of the output file.
        try:
            fileInfo = strMatNormal(mat['Aclass'])
        except KeyError:
            raise Exception('File structure not supported!')

        # Check the version of the output file (version 1.1).
        if fileInfo[1] == '1.1' and fileInfo[3] == 'binTrans':
            names = strMatTrans(mat['name'])  # names
            descr = strMatTrans(mat['description'])  # descriptions
            for i in range(len(names)):
                d = mat['dataInfo'][0][i]  # data block
                x = mat['dataInfo'][1][i]  # column (original)
                c = abs(x) - 1  # column (reduced)
                s = sign(x)  # sign
                if c:
                    _vars[names[i]] = (descr[i], d, c, float(s))
                    if not d in _blocks:
                        _blocks.append(d)
                else:
                    _absc = (names[i], descr[i])

            # Extract the trajectory for the variable 'Time' and store the data in the variable 'timeSteps'.
            timeSteps = mat['data_2'][0]

            # Compute the number of output points of trajectory (time series data).
            numOutputPts = timeSteps.shape[0]

            # Convert the variable type of 'timeSteps' from '1-d array' to '2-d array'.
            timeStepsArray = numpy.array([timeSteps])

            # Extract the names and output points of all variables and store them in the variables:
            #  - _namesData1: Names of parameters
            #  - _namesData2: Names of the variables that are not parameters
            #  - _timeSeriesData1: Trajectories (time series data) of '_namesData1'
            #  - _timeSeriesData2: Trajectories (time series data) of '_namesData2'
            for (k, v) in _vars.items():
                readIt = True
                if len(self.variablesToLoad
                       ) > 0 and k not in self.variablesToLoad:
                    readIt = False
                if readIt:
                    dataValue = mat['data_%d' % (v[1])][v[2]]
                    if v[3] < 0:
                        dataValue = dataValue * -1.0
                    if v[1] == 1:
                        _namesData1.append(k)
                        _timeSeriesData1.append(dataValue)
                    elif v[1] == 2:
                        _namesData2.append(k)
                        _timeSeriesData2.append(dataValue)
                    else:
                        raise Exception('File structure not supported!')
            timeSeriesData1 = numpy.array(_timeSeriesData1)
            timeSeriesData2 = numpy.array(_timeSeriesData2)

            # The csv writer places quotes arround variables that contain a ',' in the name, i.e.
            # a, "b,c", d would represent 3 variables 1) a 2) b,c 3) d. The csv reader in RAVEN does not
            # suport this convention.
            # => replace ',' in variable names with '@', i.e.
            # a, "b,c", d will become a, b@c, d
            for mylist in [_namesData1, _namesData2]:
                for i in range(len(mylist)):
                    if ',' in mylist[i]:
                        mylist[i] = mylist[i].replace(',', '@')

            # Recombine the names of the variables and insert the variable 'Time'.
            # Order of the variable names should be 'Time', _namesData1, _namesData2.
            # Also, convert the type of the resulting variable from 'list' to '2-d array'.
            varNames = numpy.array([[_absc[0]] + _namesData1 + _namesData2])

            # Compute the number of parameters.
            sizeParams = timeSeriesData1.shape[0]

            # Create a 2-d array whose size is 'the number of parameters' by 'number of ouput points of the trajectories'.
            # Fill each row in a 2-d array with the parameter value.
            Data1Array = numpy.full((sizeParams, numOutputPts), 1.)
            for n in range(sizeParams):
                Data1Array[n, :] = timeSeriesData1[n, 0]

            # Create an array of trajectories, which are to be written to CSV file.
            varTrajectories = numpy.matrix.transpose(
                numpy.concatenate(
                    (timeStepsArray, Data1Array, timeSeriesData2), axis=0))
            # create output response dictionary
            t = pd.Series(varTrajectories[:, 0])
            m = t.duplicated()
            if len(t[m]):
                # duplicated values
                tIndex = None
                iIndex = 1
                for i in range(len(t[m])):
                    index = t[m].index[i]
                    if tIndex is None:
                        tIndex = t[index]
                    else:
                        if mathUtils.compareFloats(tIndex,
                                                   t[index],
                                                   tol=1.0E-15):
                            iIndex += 1
                        else:
                            iIndex = 1
                            tIndex = t[index]
                    t[index] = t[index] + numpy.finfo(
                        float).eps * t[index] * iIndex
                varTrajectories[:, 0] = t.to_numpy()
            response = {
                var: varTrajectories[:, i]
                for (i, var) in enumerate(varNames[0])
            }
        else:
            raise Exception('File structure not supported!')
        #release memory
        del _vars
        del _blocks
        del _namesData1
        del _namesData2
        del _timeSeriesData1
        del _timeSeriesData2
        del _absc
        del Data1Array
        del timeSeriesData1
        del timeSeriesData2
        return response
Esempio n. 5
0
# both are 0
checkAnswer('relativeDiff both are zero', mathUtils.relativeDiff(0, 0), 0.0)
# first is inf
checkAnswer('relativeDiff first is inf', mathUtils.relativeDiff(np.inf, 0),
            np.inf)
# second is inf
checkAnswer('relativeDiff second is inf', mathUtils.relativeDiff(0, np.inf),
            np.inf)
# both are inf
checkAnswer('relativeDiff both are inf',
            mathUtils.relativeDiff(np.inf, np.inf), 0)

### check float comparison
#moderate order of magnitude
checkTrue('compareFloats moderate OoM match',
          mathUtils.compareFloats(3.141592, 3.141593, tol=1e-6), True)
checkTrue('compareFloats moderate OoM mismatch',
          mathUtils.compareFloats(3.141592, 3.141593, tol=1e-8), False)
#small order of magnitude
checkTrue('compareFloats small OoM match',
          mathUtils.compareFloats(3.141592e-15, 3.141593e-15, tol=1e-6), True)
checkTrue('compareFloats small OoM mismatch',
          mathUtils.compareFloats(3.141592e-15, 3.141593e-15, tol=1e-8), False)
#small order of magnitude
checkTrue('compareFloats large OoM match',
          mathUtils.compareFloats(3.141592e15, 3.141593e15, tol=1e-6), True)
checkTrue('compareFloats large OoM mismatch',
          mathUtils.compareFloats(3.141592e15, 3.141593e15, tol=1e-8), False)

### check "NDinArray"
points = np.array([(0.61259532, 0.27325707, 0.81182424),