def _storeFissionYields(self, chunk): """store fission yields data""" fissProd, indYield, cumYield = [], [], [] currVar = FIRST_WORD_REGEX.search(chunk[0]).group() # NFY_902270_1 # Obtain the parent ID: AAZZZ0/1, e.g., 922350 parentFY = int(str2vec(currVar.split('_')[-2])) if 'E' in currVar.split('_')[-1]: # e.g., NFY_902270_1E sclVal = SCALAR_REGEX.search(chunk[0]) # energy must be stored on the reader self._energyFY = float( str2vec(chunk[0][sclVal.span()[0] + 1:sclVal.span()[1] - 2])) return # thermal/epi/fast for tline in chunk: if '[' in tline or ']' in tline: continue tline = tline[:tline.find('%')] if len(tline.split()) == 3: val1, val2, val3 = str2vec(tline, out=list) fissProd.append(val1) indYield.append(val2) cumYield.append(val3) self.nfy[(parentFY, self._energyFY)] = { 'fissProd': array(fissProd), 'indYield': array(indYield), 'cumYield': array(cumYield) }
def _storeMetaData(self, varNamePy, varType, varVals): """Store general descriptive data""" if varType == 'string': self.metadata[varNamePy] = varVals else: # vector or scalar vals = str2vec(varVals) # convert string to floats self.metadata[varNamePy] = array(vals) # overwrite existing data
def _addMetadata(self, chunk): for varName in METADATA_KEYS: if varName not in chunk[0]: continue if varName in ['ZAI', 'NAMES']: cleaned = [line.strip() for line in chunk[1:]] if varName == 'NAMES': values = [item[1:item.find(" ")] for item in cleaned] else: values = str2vec(cleaned, int, list) else: line = self._cleanSingleLine(chunk) values = str2vec(line) self.metadata[convertVariableName(varName)] = values return warning("Unsure about how to process metadata chunk {}" .format(chunk[0]))
def _storeFluxRatio(self, chunk): """store flux ratios""" chunk0 = chunk[0] currVar = FIRST_WORD_REGEX.search(chunk0).group() # obtain the universe id univ = currVar.split('_')[-1] search = VEC_REGEX.search(chunk0) # group flux values vals = str2vec(chunk0[search.span()[0] + 1:search.span()[1] - 2]) self.fluxRatio[univ], self.fluxUnc[univ] = splitValsUncs(vals)
def _addMetadata(self, chunk): for varName, destination in METADATA_KEYS.items(): if varName not in chunk[0]: continue if varName in ['ZAI', 'NAMES']: cleaned = [line.strip() for line in chunk[1:]] if varName == 'NAMES': values = [item[1:item.find(" ")] for item in cleaned] else: values = str2vec(cleaned, int, list) else: line = self._cleanSingleLine(chunk) values = str2vec(line) setattr(self, destination, values) break else: raise ValueError("Unsure about how to process metadata chunk " "{}".format(chunk[0]))
def varTypeFactory(key): if key in VAR_NO_CONV: return lambda x: (x, None) for typeFunc in DF_CONV: # no conversion for strings if key in DF_CONV[typeFunc]: return lambda x: (typeFunc(x), None) # Perform array conversion, return expected values and uncertainties if UNIV_K_RE.search(key) is not None: return lambda x: str2vec(x, out=tuple) return lambda x: splitValsUncs(x)
def _storeResData(self, varNamePy, varVals): """Process time-dependent results data""" vals = str2vec(varVals) # convert the string to float numbers stored = self._tempArrays.get(varNamePy) if stored is None: self._tempArrays[varNamePy] = ListOfArrays(vals) elif len(stored) < self._counter['rslt']: # append this data only once! try: stored.append(vals) except Exception as ee: raise SerpentToolsException( "Error in appending {} into {} of resdata:\n{}".format( varNamePy, vals, str(ee)))
def _storeUnivData(self, varNameSer, varVals): """Process universes' data""" brState = self._getBUstate() # obtain the branching tuple values = str2vec(varVals) # convert the string to float numbers if brState not in self.universes: self.universes[brState] = \ HomogUniv(brState[0], brState[1], brState[2], brState[3]) if varNameSer == self._keysVersion['univ']: return if varNameSer not in self._keysVersion['varsUnc']: vals, uncs = splitValsUncs(values) self.universes[brState].addData(varNameSer, uncs, True) self.universes[brState].addData(varNameSer, vals, False) else: self.universes[brState].addData(varNameSer, array(values), False)
def __processEnergyChunk(self, chunk): for line in chunk: if 'SENS' == line[:4]: break else: raise SerpentToolsException("Could not find SENS parameter " "in energy chunk {}".format(chunk[:3])) splitLine = line.split() varName = splitLine[0].split('_')[1:] varValues = str2vec(splitLine[3:-1]) if varName[0] == 'E': self.energies = varValues elif varName == ['LETHARGY', 'WIDTHS']: self.lethargyWidths = varValues else: warning("Unanticipated energy setting {}".format(splitLine[0]))
def __processSensChunk(self, chunk): varName = None isEnergyIntegrated = False varName = None for line in chunk: if line == '\n' or '%' in line[:5] or '];' == line[:2]: continue if line[:3] == 'ADJ': fullVarName = line.split()[0] split = fullVarName.split('_') pertIndx = split.index('PERT') sensIndx = split.index('SENS') varName = '_'.join(split[pertIndx + 1:sensIndx]) isEnergyIntegrated = split[-2:] == ['E', 'INT'] elif varName is not None: self.__addSens(varName, str2vec(line), isEnergyIntegrated) varName = None
def _storeMicroXS(self, chunk): """store micro cross-section and uncertainty values""" currXS, currUnc = {}, {} currVar = FIRST_WORD_REGEX.search(chunk[0]).group() # obtain the universe id univ = currVar.split('_')[-1] for tline in chunk: if '[' in tline or ']' in tline: continue if '%' in tline: tline = tline[:tline.index('%')] if len(tline.split()) > 3: values = str2vec(tline) # isotope, reaction type and isomeric state reactionData = (int(values[0]), int(values[1]), int(values[2])) currXS[reactionData], currUnc[reactionData] = splitValsUncs( values[3:]) self.xsVal[univ] = currXS self.xsUnc[univ] = currUnc
def _storeResData(self, varNamePy, varVals): """Process time-dependent results data""" vals = str2vec(varVals) # convert the string to float numbers if varNamePy in self.resdata.keys(): # extend existing matrix currVar = self.resdata[varNamePy] ndim = 1 if len(currVar.shape) == 2: ndim = currVar.shape[0] if ndim < self._counter['rslt']: # append this data only once! try: stacked = vstack([self.resdata[varNamePy], vals]) self.resdata[varNamePy] = stacked except Exception as ee: raise SerpentToolsException( "Error in appending {} into {} of resdata:\n{}". format(varNamePy, vals, str(ee))) else: self.resdata[varNamePy] = array(vals) # define a new matrix
def _processSensChunk(self, chunk): varName = None isEnergyIntegrated = False varName = None for line in chunk: if line == '\n' or '%' in line[:5] or '];' == line[:2]: continue if line[:3] == 'ADJ': fullVarName = line.split()[0] nameProps = self._getAdjVarProps(fullVarName.split("_")) varName = nameProps.get("name") if varName is None: raise ValueError( "Cannot get response name from {}".format(fullVarName)) isEnergyIntegrated = nameProps.get("energyFlag", False) latentGen = nameProps.get("latent") elif varName is not None: self._addSens( varName, str2vec(line), isEnergyIntegrated, latentGen) varName = None
def cleanDetChunk(chunk): """ Return the name of the detector [grid] and the array of data. Parameters ---------- chunk: list Chunk of text from the output file pertaining to this section. Should begin with ``DET<name>[<grid>] = [`` with array data on the subsequent lines Returns ------- str: Name of the detector including grid characters numpy.ndarray: Array containing numeric data from the chunk Raises ------ SerpentToolsException: If the name of the detector could not be determined """ if chunk[0][:3] != 'DET': raise SerpentToolsException( "Could not determine name of detector from chunk: {}".format( chunk[0])) leader = chunk.pop(0) name = leader.split()[0][3:] if chunk[-1][:2] == '];': chunk.pop(-1) nCols = len(chunk[0].split()) data = empty((len(chunk), nCols), order='F') for indx, row in enumerate(chunk): data[indx] = str2vec(row) return name, data
def test_vecOfStr(self): """Verify a single word can be converted with str2vec""" key = 'ADF' expected = array('ADF') actual = str2vec(key) assert_array_equal(expected, actual)