def _compareLogPreMsg(self, other, lower=None, upper=None, sigma=None, quantity=None): """Log an INFO message about this specific comparison.""" leader = "Comparing {}> against < with the following tolerances:" tols = [ leader.format((quantity + ' from ') if quantity else ''), ] for leader, obj in zip(('>', '<'), (self, other)): tols.append("{} {}".format(leader, obj)) for title, val in zip(('Lower', 'Upper'), (lower, upper)): if val is None: continue tols.append("{} tolerance: {:5.3F} [%]".format(title, val)) if sigma is not None: sigmaStr = ("Confidence interval for statistical values: {:d} " "sigma or {} %") sigmaDict = {1: 68, 2: 95} tols.append( sigmaStr.format( sigma, sigmaDict.get(sigma, '>= 99.7') if sigma else 0)) info('\n\t'.join(tols))
def loadYaml(self, filePath, strict=True): """ Update the settings based on the contents of the yaml file .. versionadded:: 0.2.0 Parameters ---------- filePath: str, or FileType Path to config file strict: bool Fail at the first incorrect setting. If false, failed settings will not be loaded and alerts will be raised Raises ------ KeyError or TypeError If settings found in the config file are not valid FileNotFound or OSError If the file does not exist """ messages.debug('Attempting to read from {}'.format(filePath)) with open(filePath) as yFile: configSettings = safe_load(yFile) messages.debug( 'Loading settings onto object with strict:{}'.format(strict)) for key, value in iteritems(configSettings): if isinstance(value, dict): self.__recursiveLoad(value, strict, key) else: self.__safeLoad(key, value, strict) messages.info('Done')
def read(self): """Read the branching file and store the coefficients.""" info('Preparing to read {}'.format(self.filePath)) with open(self.filePath) as fObj: self.__fileObj = fObj while self.__fileObj is not None: self._processBranchBlock() info('Done reading branching file')
def read(self): """The main method for reading that not only parses data, but also runs pre and post checks. """ info("Reading {}".format(self.filePath)) self._precheck() self._read() info(" - done") self._postcheck()
def read(self): """Read all the files and create parser objects""" self._readAll() if not self.settings['skipPrecheck']: self._precheck() else: debug('Skipping pre-check') self.process() if self.settings['freeAll']: info("Removing all parsers and containers from memory since " "setting <sampler.freeAll> is ``True``") self.free()
def _read(self): """Read through the depletion file and store requested data.""" info('Preparing to read {}'.format(self.filePath)) keys = ['E', 'i\d{4,5}', 'm\w'] separators = ['\n', '];', '\r\n'] with KeywordParser(self.filePath, keys, separators) as parser: for chunk in parser.yieldChunks(): if chunk[0][:5] == 'E = [': # The energy grid self.metadata['egrid'] = np.array(chunk[1:], dtype=np.float64) elif chunk[0][:15] == 'majorant_xs = [': # L-inf norm on all XS on all materials self.metadata['majorant_xs'] = np.array(chunk[1:], dtype=np.float64) elif chunk[0][-7:] == 'mt = [\n': debug('found mt specification') xsname = chunk[0][:-8] isiso = True if chunk[0][0] == 'i' else False self.xsections[xsname] = XSData(xsname, self.metadata, isIso=isiso) self.xsections[xsname].setMTs(chunk) elif chunk[0][-7:] == 'xs = [\n': debug('found xs specification') xsname = chunk[0][:-8] self.xsections[xsname].setData(chunk) elif chunk[0][-7:] == 'nu = [\n': debug('found nu specification') xsname = chunk[0][:-8] self.xsections[xsname].setNuData(chunk) elif 'bra_f' in chunk[0]: warning("There is this weird 'bra_f' XS. these seem to be" " constant. recording to metadata instead.") self.metadata[xsname].setData(chunk) else: print(chunk) error('Unidentifiable entry {}'.format(chunk[0])) info('Done reading xsplot file') debug(' found {} xs listings'.format(len(self.xsections)))
def _cleanData(self, name, value): """ Return the new value to be stored after some cleaning. Reshapes scattering matrices if number of groups is known and ``xs.reshapeScatter`` """ ng = self.numGroups if self.__reshaped and name in SCATTER_MATS: if ng is None: info("Number of groups is unknown at this time. " "Will not reshape variable {}".format(name)) else: value = value.reshape(ng, ng, order="F") return value
def read(self): """Read through the depletion file and store requested data.""" messages.info('Preparing to read {}'.format(self.filePath)) keys = ['MAT', 'TOT'] if self.settings['processTotal'] else ['MAT'] keys.extend(self.settings['metadataKeys']) separators = ['\n', '];', '\r\n'] with KeywordParser(self.filePath, keys, separators) as parser: for chunk in parser.yieldChunks(): if 'MAT' in chunk[0]: self._addMaterial(chunk) elif 'TOT' in chunk[0]: self._addTotal(chunk) else: self._addMetadata(chunk) if 'days' in self.metadata: for mKey in self.materials: self.materials[mKey].days = self.metadata['days'] messages.info('Done reading depletion file') messages.debug(' found {} materials'.format(len(self.materials)))
def _cleanData(self, name, value): """ Return the new value to be stored after some cleaning. Makes sure all vectors, everything but keff/kinf data, are converted to numpy arrays. Reshapes scattering matrices if number of groups is known and ``xs.reshapeScatter`` """ if not isinstance(value, ndarray): value = array(value) if CRIT_RE.search(name): return value[0] ng = self.numGroups if self.__reshaped and name in SCATTER_MATS: if ng is None: info("Number of groups is unknown at this time. " "Will not reshape variable {}".format(name)) else: value = value.reshape(ng, ng, order="F") return value
def _toMatlab(args): """ Write contents of a file to matlab. Return codes: 0: all good 1: need scipy 3: conversion for file type not supported yet """ inFile = args.file outFile = args.output if not outFile: base = splitext(inFile)[0] outFile = base + '.mat' # inferReader returns the class, but we need an instance reader = inferReader(inFile)(inFile) try: converter = MatlabConverter(reader, outFile) except ImportError: error("scipy >= 1.0 required to convert to matlab") return 1 except NotImplementedError: error("Conversion not supported for {} reader at this time. ".format( reader.__class__.__name__)) error("Please alert the developers of your need.") return 3 reader.read() converter.convert(True, append=args.append, format=args.format, longNames=args.longNames, compress=not args.large, oned=args.oned) if not args.q: if args.v: info("Wrote contents of {} to {}".format(inFile, outFile)) else: print(outFile) return 0
def compareMetadata(self, other, header=False): """ Return True if the metadata (settings) are identical. Parameters ---------- other: :class:`ResultsReader` Class against which to compare {header} Returns ------- bool: If the metadata are identical Raises ------ {compTypeErr} """ self._checkCompareObj(other) if header: self._compareLogPreMsg(other, quantity='metadata') myKeys = set(self.metadata.keys()) otherKeys = set(other.metadata.keys()) similar = not any(myKeys.symmetric_difference(otherKeys)) commonKeys = getCommonKeys(myKeys, otherKeys, 'metadata') skips = commonKeys.intersection(self.__METADATA_COMP_SKIPS) if any(skips): info("The following items will be skipped in the comparison\n\t{}". format(', '.join(sorted(skips)))) for key in sorted(commonKeys): if key in self.__METADATA_COMP_SKIPS: continue selfV = self.metadata[key] otherV = other.metadata[key] similar &= logDirectCompare(selfV, otherV, 0., 0., key) return similar
def inferReader(filePath): """ Attempt to infer the correct reader type. Parameters ---------- filePath: str File to be read. Raises ------ SerpentToolsException If a reader cannot be inferred """ for reg, reader in six.iteritems(REGEXES): match = re.match(reg, filePath) if match and match.group() == filePath: info('Inferred reader for {}: {}'.format(filePath, reader.__name__)) return reader raise SerpentToolsException( 'Failed to infer filetype and thus accurate reader from' 'file path {}'.format(filePath))
def seedFiles(inputFile, numSeeds, seed=None, outputDir=None, link=False, length=10): """ Copy input file multiple times with unique seeds. Parameters ---------- inputFile: str Path to input file numSeeds: int Number of files to create seed: int Optional argument to set the seed of the builtin random number generator outputDir: str Path to desired output directory. Files will be copied here. If the folder does not exist, try to make the directory. Assumes path relative to directory that contains the input file link: bool If True, do not copy the full file. Instead, create a new file with ``'include <inputFile>'`` and the new seed declaration. length: int Number of digits for random seeds Returns ------- list: List of the names of all files created Raises ------ OSError: If the requested input file could not be found and ``link`` does not evaluate to true. ValueError: If the number of requested seeds is not a positive integer, nor can be converted to one, or if the length of the random seeds cannot be converted to a positive integer. TypeError: Raised if the values passed to ``length`` or ``nseeds`` cannot be converted to integers with :func:`int` See Also -------- :func:`generateSeed` :mod:`random` :func:`random.seed()` :func:`random.getrandbits()` """ if '~' in inputFile: inputFile = os.path.expanduser(inputFile) if not path.exists(inputFile): raise OSError('Input file {} does not exist'.format(inputFile)) if not isinstance(numSeeds, int): numSeeds = int(numSeeds) if numSeeds < 1: raise ValueError('Require positive number of files to create') bits = _getBitsForLength(length) random.seed(seed) inputPath = path.abspath(path.join(os.getcwd(), inputFile)) inputRoot = path.dirname(inputPath) if outputDir is not None: fPrefix = path.abspath(path.join(inputRoot, outputDir)) if not path.isdir(fPrefix): info('Creating directory at {}'.format(fPrefix)) os.mkdir(fPrefix) else: fPrefix = inputRoot fileFmt = path.join(fPrefix, _makeFileFmt(inputFile)) writeFunc = _include if link else _copy return writeFunc(inputPath, numSeeds, fileFmt, bits, length)
import os ROOT_DIR = os.path.dirname(__file__) from serpentTools.parsers import read from serpentTools import messages # List TODOS/feature requests here for now # Compatibility # TODO: Test compatibility with earlier numpy releases # Usage/scripting # TODO: Update rc with dictionary # TODO: Update rc with yaml file into dictionary # TODO: Capture materials with underscores for depletion # TODO: Find a way to capture some or all of log messages for testing from ._version import get_versions __version__ = get_versions()['version'] del get_versions messages.info('Using version {}'.format(__version__))