def __trainLocal__(self,featureVals,targetVals): """ Because HDMR rom is a collection of sub-roms, we call sub-rom "train" to do what we need it do. @ In, featureVals, np.array, training feature values @ In, targetVals, np.array, training target values @ Out, None """ if not self.initialized: self.raiseAnError(RuntimeError,'ROM has not yet been initialized! Has the Sampler associated with this ROM been used?') ft={} self.refSoln = {key:dict({}) for key in self.target} for i in range(len(featureVals)): ft[tuple(featureVals[i])]=targetVals[i,:] #get the reference case self.refpt = tuple(self.__fillPointWithRef((),[])) for cnt, target in enumerate(self.target): self.refSoln[target] = ft[self.refpt][cnt] for combo,rom in self.ROMs.items(): subtdict = {key:list([]) for key in self.target} for c in combo: subtdict[c]=[] SG = rom.sparseGrid fvals=np.zeros([len(SG),len(combo)]) tvals=np.zeros((len(SG),len(self.target))) for i in range(len(SG)): getpt=tuple(self.__fillPointWithRef(combo,SG[i][0])) #the 1e-10 is to be consistent with RAVEN's CSV print precision tvals[i,:] = ft[tuple(mathUtils.NDInArray(np.array(list(ft.keys())),getpt,tol=1e-10)[2])] for fp,fpt in enumerate(SG[i][0]): fvals[i][fp] = fpt for i,c in enumerate(combo): subtdict[c] = fvals[:,i] for cnt, target in enumerate(self.target): subtdict[target] = tvals[:,cnt] rom.train(subtdict) #make ordered list of combos for use later maxLevel = max(list(len(combo) for combo in self.ROMs.keys())) self.combos = [] for i in range(maxLevel+1): self.combos.append([]) for combo in self.ROMs.keys(): self.combos[len(combo)].append(combo) #list of term objects self.terms = {():[]} # each entry will look like 'x1,x2':('x1','x2'), missing the reference entry for l in range(1,maxLevel+1): for romName in self.combos[l]: self.terms[romName] = [] # add subroms -> does this get referenece case, too? for key in self.terms.keys(): if set(key).issubset(set(romName)) and key!=romName: self.terms[romName].append(key) #reduce terms self.reducedTerms = {} for term in self.terms.keys(): self._collectTerms(term,self.reducedTerms) #remove zero entries self._removeZeroTerms(self.reducedTerms) self.amITrained = True
checkTrue('compareFloats moderate OoM match',mathUtils.compareFloats(3.141592,3.141593,tol=1e-6),True) checkTrue('compareFloats moderate OoM mismatch',mathUtils.compareFloats(3.141592,3.141593,tol=1e-8),False) #small order of magnitude checkTrue('compareFloats small OoM match',mathUtils.compareFloats(3.141592e-15,3.141593e-15,tol=1e-6),True) checkTrue('compareFloats small OoM mismatch',mathUtils.compareFloats(3.141592e-15,3.141593e-15,tol=1e-8),False) #small order of magnitude checkTrue('compareFloats large OoM match',mathUtils.compareFloats(3.141592e15,3.141593e15,tol=1e-6),True) checkTrue('compareFloats large OoM mismatch',mathUtils.compareFloats(3.141592e15,3.141593e15,tol=1e-8),False) ### check "NDinArray" points = np.array([(0.61259532,0.27325707,0.81182424), (0.54608679,0.82470626,0.39170769)]) findSmall = (0.55,0.82,0.39) findLarge = (0.61259532123,0.27325707123,0.81182423999) found,idx,entry = mathUtils.NDInArray(points,findSmall,tol=1e-2) checkAnswer('NDInArray %s found' %str(findSmall),int(found),1) checkAnswer('NDInArray %s idx' %str(findSmall),idx,1) checkArray('NDInArray %s entry' %str(findSmall),entry,points[1]) found,idx,entry = mathUtils.NDInArray(points,findSmall,tol=1e-3) checkAnswer('NDInArray %s not found' %str(findSmall),int(found),0) checkType('NDInArray %s no idx' %str(findSmall),idx,None) checkType('NDInArray %s no entry' %str(findSmall),entry,None) found,idx,entry = mathUtils.NDInArray(points,findLarge,tol=1e-8) checkAnswer('NDInArray %s found' %str(findLarge),int(found),1) checkAnswer('NDInArray %s idx' %str(findLarge),idx,0) checkArray('NDInArray %s entry' %str(findLarge),entry,points[0]) ### check "normalizationFactors" zeroList = [0,0,0,0,0] fourList = [4,4,4,4,4]