def test_splitCopy(self): """Verfiy that a copy, not a view, is returned when copy=True""" viewV, viewU = splitValsUncs(self.input) copyV, copyU = splitValsUncs(self.input, copy=True) for view, copy, msg in zip((viewV, viewU), (copyV, copyU), ('value', 'uncertainty')): assert_array_equal(view, copy, err_msg=msg) self.assertFalse(view is copy, msg=msg)
def compareResults(self, other, lower=DEF_COMP_LOWER, upper=DEF_COMP_UPPER, sigma=DEF_COMP_SIGMA, header=False): """ Compare the contents of the results dictionary Parameters ---------- other: :class:`ResultsReader` Class against which to compare {compLimits} {sigma} {header} Returns ------- bool: If the results data agree to given tolerances Raises ------ {compTypeErr} """ self._checkCompareObj(other) if header: self._compareLogPreMsg(other, lower, upper, sigma, 'results') myRes = self.resdata otherR = other.resdata commonTypeKeys = getKeyMatchingShapes(myRes, otherR, 'results') similar = len(commonTypeKeys) == len(myRes) == len(otherR) for key in sorted(commonTypeKeys): mine = myRes[key] theirs = otherR[key] if key in RES_DATA_NO_UNCS: similar &= logDirectCompare(mine, theirs, lower, upper, key) continue myVals, myUncs = splitValsUncs(mine) theirVals, theirUncs = splitValsUncs(theirs) similar &= getLogOverlaps(key, myVals, theirVals, myUncs, theirUncs, sigma, relative=True) return similar
def test_splitVals(self): """Verify the basic functionality.""" expectedV = array([0, 2]) expectedU = array([1, 3]) actualV, actualU = splitValsUncs(self.input) assert_array_equal(expectedV, actualV, err_msg="Values") assert_array_equal(expectedU, actualU, err_msg="Uncertainties")
def test_splitAtCols(self): """Verify that the splitValsUncs works for 2D arrays.""" mat = self.input.reshape(2, 2) expectedV = array([[0], [2]]) expectedU = array([[1], [3]]) actualV, actualU = splitValsUncs(mat) assert_array_equal(expectedV, actualV, err_msg="Values") assert_array_equal(expectedU, actualU, err_msg="Uncertainties")
def _storeFluxRatio(self, chunk): """store flux ratios""" chunk0 = chunk[0] currVar = FIRST_WORD_REGEX.search(chunk0).group() # obtain the universe id univ = currVar.split('_')[-1] search = VEC_REGEX.search(chunk0) # group flux values vals = str2vec(chunk0[search.span()[0] + 1:search.span()[1] - 2]) self.fluxRatio[univ], self.fluxUnc[univ] = splitValsUncs(vals)
def varTypeFactory(key): if key in VAR_NO_CONV: return lambda x: (x, None) for typeFunc in DF_CONV: # no conversion for strings if key in DF_CONV[typeFunc]: return lambda x: (typeFunc(x), None) # Perform array conversion, return expected values and uncertainties if UNIV_K_RE.search(key) is not None: return lambda x: str2vec(x, out=tuple) return lambda x: splitValsUncs(x)
def _storeUnivData(self, varNameSer, varVals): """Process universes' data""" brState = self._getBUstate() # obtain the branching tuple values = str2vec(varVals) # convert the string to float numbers if brState not in self.universes: self.universes[brState] = \ HomogUniv(brState[0], brState[1], brState[2], brState[3]) if varNameSer == self._keysVersion['univ']: return if varNameSer not in self._keysVersion['varsUnc']: vals, uncs = splitValsUncs(values) self.universes[brState].addData(varNameSer, uncs, True) self.universes[brState].addData(varNameSer, vals, False) else: self.universes[brState].addData(varNameSer, array(values), False)
def _storeMicroXS(self, chunk): """store micro cross-section and uncertainty values""" currXS, currUnc = {}, {} currVar = FIRST_WORD_REGEX.search(chunk[0]).group() # obtain the universe id univ = currVar.split('_')[-1] for tline in chunk: if '[' in tline or ']' in tline: continue if '%' in tline: tline = tline[:tline.index('%')] if len(tline.split()) > 3: values = str2vec(tline) # isotope, reaction type and isomeric state reactionData = (int(values[0]), int(values[1]), int(values[2])) currXS[reactionData], currUnc[reactionData] = splitValsUncs( values[3:]) self.xsVal[univ] = currXS self.xsUnc[univ] = currUnc
def _processBranchUniverses(self, branch, burnup, burnupIndex): """Add universe data to this branch at this burnup.""" unvID, numVariables = [int(xx) for xx in self._advance()] univ = branch.addUniverse(unvID, burnup, burnupIndex - 1) for step in range(numVariables): splitList = self._advance( possibleEndOfFile=step == numVariables - 1) varName = splitList[0] varValues = [float(xx) for xx in splitList[2:]] if not varValues: debug("No data present for variable {}. Skipping" .format(varName)) continue if self._checkAddVariable(varName): if self._hasUncs: vals, uncs = splitValsUncs(varValues) univ.addData(varName, array(vals), uncertainty=False) univ.addData(varName, array(uncs), uncertainty=True) else: univ.addData(varName, array(varValues), uncertainty=False)
def _processBranchUniverses(self, branch, burnup, burnupIndex): """Add universe data to this branch at this burnup.""" unvID, numVariables = self._advance() numVariables = int(numVariables) if burnup < 0: key = UnivTuple(unvID, None, burnupIndex - 1, -burnup) else: key = UnivTuple(unvID, burnup, burnupIndex - 1, None) univ = HomogUniv(*key) branch[key] = univ for step in range(numVariables): splitList = self._advance( possibleEndOfFile=(step == numVariables - 1)) varName = splitList[0] varValues = [float(xx) for xx in splitList[2:]] if not varValues: continue if self._checkAddVariable(varName): if self._hasUncs: vals, uncs = splitValsUncs(varValues) univ.addData(varName, array(vals), uncertainty=False) univ.addData(varName, array(uncs), uncertainty=True) else: univ.addData(varName, array(varValues), uncertainty=False)