示例#1
0
 def plotConvertedBlock(self):
     """Render an image of the converted block."""
     figName = self._sourceBlock.name + "_1D_cylinder.svg"
     runLog.extra(
         "Plotting equivalent cylindrical block of {} as {}".format(
             self._sourceBlock, figName
         )
     )
     fig, ax = plt.subplots()
     fig.patch.set_visible(False)
     ax.patch.set_visible(False)
     ax.axis("off")
     patches = []
     colors = []
     for circleComp in self.convertedBlock:
         innerR, outerR = (
             circleComp.getDimension("id") / 2.0,
             circleComp.getDimension("od") / 2.0,
         )
         runLog.debug(
             "Plotting {:40s} with {:10.3f} {:10.3f} ".format(
                 circleComp, innerR, outerR
             )
         )
         circle = Wedge((0.0, 0.0), outerR, 0, 360.0, outerR - innerR)
         patches.append(circle)
         colors.append(circleComp.density())
     colorMap = matplotlib.cm
     p = PatchCollection(patches, alpha=1.0, linewidths=0.1, cmap=colorMap.rainbow)
     p.set_array(numpy.array(colors))
     p.set_clim(0, 20)
     ax.add_collection(p)
     ax.autoscale_view(True, True, True)
     plt.savefig(figName)
     return figName
    def updateNuclideTemperatures(self, blockCollectionByXsGroup=None):
        """
        Recompute nuclide temperatures for the block collections within the core.

        Parameters
        ----------
        blockCollectionByXsGroup : dict, optional
            Mapping between the XS IDs in the core and the block collections. Note that providing this as
            an arugment will only update the average temperatures of these XS IDs/block collections and will
            result in other XS ID average temperatures not included to be discarded.

        Notes
        -----
        This method does not update any properties of the representative blocks.
        Temperatures are obtained from the BlockCollection class rather than the representative block.
        """
        self.avgNucTemperatures = {}
        blockCollectionsByXsGroup = (
            blockCollectionByXsGroup or self.makeCrossSectionGroups()
        )
        runLog.info(
            "Updating representative block average nuclide temperatures for the following XS IDs: {}".format(
                blockCollectionsByXsGroup.keys()
            )
        )
        for xsID, collection in blockCollectionsByXsGroup.items():
            collection.calcAvgNuclideTemperatures()
            self.avgNucTemperatures[xsID] = collection.avgNucTemperatures
            runLog.extra("XS ID: {}, Collection: {}".format(xsID, collection))
示例#3
0
 def plotConvertedBlock(self, fName=None):
     """Render an image of the converted block."""
     runLog.extra("Plotting equivalent cylindrical block of {}".format(
         self._sourceBlock))
     fig, ax = plt.subplots()
     fig.patch.set_visible(False)
     ax.patch.set_visible(False)
     ax.axis("off")
     patches = []
     colors = []
     for circleComp in self.convertedBlock:
         innerR, outerR = (
             circleComp.getDimension("id") / 2.0,
             circleComp.getDimension("od") / 2.0,
         )
         runLog.debug("Plotting {:40s} with {:10.3f} {:10.3f} ".format(
             circleComp, innerR, outerR))
         circle = Wedge((0.0, 0.0), outerR, 0, 360.0, outerR - innerR)
         patches.append(circle)
         colors.append(circleComp.density())
     colorMap = matplotlib.cm
     p = PatchCollection(patches,
                         alpha=1.0,
                         linewidths=0.1,
                         cmap=colorMap.YlGn)
     p.set_array(numpy.array(colors))
     ax.add_collection(p)
     ax.autoscale_view(True, True, True)
     ax.set_aspect("equal")
     fig.tight_layout()
     if fName:
         plt.savefig(fName)
     else:
         plt.show()
     return fName
 def _summarizeGroups(self, blockCollectionsByXsGroup):
     """Summarize current contents of the XS groups."""
     runLog.extra("Cross section group manager summary")
     runLog.extra("Averaging performed by `{0}`".format(
         self.cs["xsBlockRepresentation"]))
     for xsID, blocks in blockCollectionsByXsGroup.items():
         if blocks:
             xsIDGroup = self._getXsIDGroup(xsID)
             if xsIDGroup == self._REPR_GROUP:
                 reprBlock = self.representativeBlocks.get(xsID)
                 lfps = reprBlock.getLumpedFissionProductCollection()
                 if lfps:
                     fissionGasRemoved = list(
                         lfps.values())[0].getGasRemovedFrac()
                 else:
                     fissionGasRemoved = 0.0
                 runLog.extra(
                     "XS ID {} contains {:4d} blocks, represented by: {:65s}"
                     " Fission Gas Removal Fraction: {:.2f}".format(
                         xsID, len(blocks), reprBlock, fissionGasRemoved))
             elif xsIDGroup == self._NON_REPR_GROUP:
                 runLog.extra(
                     "XS ID {} contains {:4d} blocks, but no representative block."
                     "".format(xsID, len(blocks)))
             elif xsIDGroup == self._PREGEN_GROUP:
                 xsFileNames = [
                     y for _x, y in self._getPregeneratedXsFileLocationData(
                         xsID)
                 ]
                 runLog.extra(
                     "XS ID {} contains {:4d} blocks, represented by: {}"
                     "".format(xsID, len(blocks), xsFileNames))
             else:
                 raise ValueError(
                     "No valid group for XS ID {}".format(xsID))
示例#5
0
def store(exePath, inputPaths, outputFiles, cacheDir):
    """
    Store an output file in the cache.

    Notes
    -----
    Input paths need to be in the same order each time if the same cached folder is expected to be found.
    It is difficult to know what outputs will exist from a specific run, so only
    outputs that do exist will attempt to be copied.
    This function should be supplied with a greedy list of outputs.
    """
    # outputFilePaths is a greedy list and they might not all be produced
    outputsThatExist = [
        outputFile for outputFile in outputFiles if os.path.exists(outputFile)
    ]

    folderLoc = _getCachedFolder(exePath, inputPaths, cacheDir)
    if os.path.exists(folderLoc):
        deleteCache(folderLoc)
    os.makedirs(folderLoc)
    _makeOutputManifest(outputsThatExist, folderLoc)

    for outputFile in outputsThatExist:
        baseName = os.path.basename(outputFile)
        cachedLoc = os.path.join(folderLoc, baseName)
        shutil.copy(outputFile, cachedLoc)
    runLog.extra("Added outputs for {} to the cache.".format(exePath))
示例#6
0
 def convert(self, r=None):
     runLog.extra("Building copy of {} with a uniform axial mesh".format(r))
     self._sourceReactor = r
     self.convReactor = self.initNewReactor(r)
     self._computeAverageAxialMesh()
     self._convertNumberDensities()
     return self.convReactor
示例#7
0
 def _writeBlockParams(self, reactor, timeStep):
     """Writing block parameter info"""
     runLog.extra(
         "Writing at time step {} block parameter info for {}".format(
             timeStep, reactor))
     blocks = reactor.core.getBlocksByIndices(self._frozenBlockOrder)
     self._createParamDatasets("{}/blocks".format(timeStep), blocks)
示例#8
0
文件: operator.py 项目: guruprad/armi
    def _processInterfaceDependencies(self):
        """
        Check all interfaces' dependencies and adds missing ones.

        Notes
        -----
        Order does not matter here because the interfaces added here are disabled and playing supporting
        role so it is not intended to run on the interface stack. They will be called by other interfaces.

        As mentioned in :py:meth:`addInterface`, it may be better to just insantiate utility code
        when its needed rather than rely on this system.
        """
        # Make multiple passes in case there's one added that depends on another.
        for _dependencyPass in range(5):
            numInterfaces = len(self.interfaces)
            # manipulation friendly, so it's ok to add additional things to the stack
            for i in self.getInterfaces():
                for dependency in i.getDependencies(self.cs):
                    name = dependency.name
                    function = dependency.function
                    klass = dependency

                    if not self.getInterface(name, function=function):
                        runLog.extra(
                            "Attaching {} interface (disabled, BOL forced) due to dependency in {}"
                            .format(klass.name, i.name))
                        self.addInterface(klass(r=self.r, cs=self.cs),
                                          enabled=False,
                                          bolForce=True)
            if len(self.interfaces) == numInterfaces:
                break
        else:
            raise RuntimeError(
                "Interface dependency resolution did not converge.")
示例#9
0
    def _readFluxes(self):
        """
        Read real and/or adjoint fluxes from binary interface files onto data model.

        This most straightforward place to get this from is from
        the :py:mod:`armi.nuclearDataIO.cccc.rtflux` CCCC interface files, which DIF3D can be asked
        to write in all geometry options. RTFLUX has the data
        collected by i,j,k mesh index, and it is possible that each
        ARMI block covers more than one (i,j,k) mesh point. This is
        common, e.g. in finite difference cases.

        Notes
        -----
        This may need more conditionals to expand it to work with photon parameters
        in gamma transport cases.
        """
        solutionType = SolutionType.fromGlobalFluxOpts(self.opts)
        if solutionType in (
                SolutionType.REAL,
                SolutionType.REAL_AND_ADJOINT,
        ):
            runLog.extra("Reading real flux from RTFLUX")
            rtfluxData = rtflux.RtfluxStream.readBinary(rtflux.RTFLUX)
            self._applyFluxData(rtfluxData, "mgFlux", "flux")

        if solutionType in (
                SolutionType.ADJOINT,
                SolutionType.REAL_AND_ADJOINT,
        ):
            runLog.extra("Reading adjoint flux from ATFLUX")
            atfluxData = rtflux.AtfluxStream.readBinary(rtflux.ATFLUX)
            self._applyFluxData(atfluxData, "adjMgFlux", "fluxAdj")
示例#10
0
文件: xtviewDB.py 项目: wilcoxjd/armi
    def _updateBlockTypeFromDB(self, reactor, blockList, dbTimeStep):
        """Updates blocks in reactor if database says its a different type of block"""
        dataList = self.readBlockParam("type", dbTimeStep)

        for blockTypeInDB, b in zip(dataList, blockList):
            oldBType = b.getType()

            if blockTypeInDB != oldBType:
                a = b.parent
                bolAssem = reactor.blueprints.assemblies.get(a.getType(), None)
                if not bolAssem:
                    raise RuntimeError(
                        "No BOL assem of type {0} exists in the input.".format(
                            a.getType()
                        )
                    )
                newBlock = bolAssem.getFirstBlockByType(blockTypeInDB)
                if not newBlock:
                    raise RuntimeError(
                        "Could not find a {0} block in {1}. Not updating block type.".format(
                            blockTypeInDB, a
                        )
                    )
                else:
                    newBlock = copy.deepcopy(newBlock)
                    runLog.extra(
                        "Updating block {} with BOL block: {} because the block type "
                        "changed from {} to {}".format(
                            b, newBlock, oldBType, blockTypeInDB
                        )
                    )
                    b.replaceBlockWithBlock(newBlock)
示例#11
0
 def applyStateToOriginal(self):
     """
     Now that state is computed on the uniform mesh, map it back to ARMI mesh.
     """
     runLog.extra(
         "Applying uniform neutronics mesh results on {0} to ARMI mesh on {1}"
         .format(self.convReactor, self._sourceReactor))
     self._clearStateOnReactor(self._sourceReactor)
     self._mapStateFromReactorToOther(self.convReactor, self._sourceReactor)
示例#12
0
 def _combineLastTwoRadialBins(self):
     if (self.radialMesh[-1] - self.radialMesh[-2]) == 1:
         runLog.extra(
             "Outermost ring of the core {} is not fully filled and will be homogenized with the "
             "previous ring {}".format(self.radialMesh[-1],
                                       self.radialMesh[-2]))
         self.radialMesh.pop(-1)
         self.radialMesh.pop(-2)
         self.radialMesh.append(self.radialMesh[-1])
示例#13
0
    def applyStateToOriginal(self):
        """
        Now that flux/power, etc. is computed on the uniform mesh (newReactor), map it back to ARMI mesh (sourceReactor)

        this runs after the neutronics has been computed on the neutronics mesh
        """
        runLog.extra(
            "Applying uniform neutronics mesh results on {0} to ARMI mesh on {1}"
            .format(self.convReactor, self._sourceReactor))
        self._setParamsToUpdate()
        self._clearStateOnSourceReactor()

        def paramSetter(armiObject, vals, paramNames):
            for paramName, val in zip(paramNames, vals):
                armiObject.p[paramName] = val

        def paramGetter(armiObject, paramNames):
            paramVals = []
            for paramName in paramNames:
                paramVals.append(armiObject.p[paramName])
            return numpy.array(paramVals)

        def fluxSetter(block, flux, _paramNames):
            block.p.mgFlux = list(flux)

        def fluxGetter(block, _paramNames):
            val = block.p.mgFlux
            if val is None or len(val) == 0:
                # so the merger can detect and just use incremental value.
                return None
            else:
                return numpy.array(val)

        def adjointFluxSetter(block, flux, _paramNames):
            block.p.adjMgFlux = list(flux)

        def adjointFluxGetter(block, _paramNames):
            val = block.p.adjMgFlux
            if not val:
                # so the merger can detect and just use incremental value.
                return None
            else:
                return numpy.array(val)

        for paramName in self.reactorParamNames:
            self._sourceReactor.core.p[paramName] = self.convReactor.core.p[
                paramName]

        for aUniform in self.convReactor.core:
            aReal = self._sourceReactor.core.getAssemblyByName(
                aUniform.getName())
            _setStateFromOverlaps(aUniform, aReal, paramSetter, paramGetter,
                                  self.blockParamNames)
            _setStateFromOverlaps(aUniform, aReal, fluxSetter, fluxGetter,
                                  ["mgFlux"])
            _setStateFromOverlaps(aUniform, aReal, adjointFluxSetter,
                                  adjointFluxGetter, ["adjMgFlux"])
示例#14
0
    def _mapStateFromReactorToOther(self, sourceReactor, destReactor):
        UniformMeshGeometryConverter._mapStateFromReactorToOther(
            self, sourceReactor, destReactor)

        def paramSetter(armiObject, vals, paramNames):
            for paramName, val in zip(paramNames, vals):
                armiObject.p[paramName] = val

        def paramGetter(armiObject, paramNames):
            paramVals = []
            for paramName in paramNames:
                paramVals.append(armiObject.p[paramName])
            return numpy.array(paramVals)

        def fluxSetter(block, flux, _paramNames):
            block.p.mgFlux = list(flux)

        def fluxGetter(block, _paramNames):
            val = block.p.mgFlux
            if val is None or len(val) == 0:
                # so the merger can detect and just use incremental value.
                return None
            else:
                return numpy.array(val)

        def adjointFluxSetter(block, flux, _paramNames):
            block.p.adjMgFlux = list(flux)

        def adjointFluxGetter(block, _paramNames):
            val = block.p.adjMgFlux
            if not val:
                # so the merger can detect and just use incremental value.
                return None
            else:
                return numpy.array(val)

        for paramName in self.reactorParamNames:
            destReactor.core.p[paramName] = sourceReactor.core.p[paramName]

        for aSource in sourceReactor.core:
            aDest = destReactor.core.getAssemblyByName(aSource.getName())
            _setStateFromOverlaps(aSource, aDest, fluxSetter, fluxGetter,
                                  ["mgFlux"])
            _setStateFromOverlaps(aSource, aDest, adjointFluxSetter,
                                  adjointFluxGetter, ["adjMgFlux"])
            _setStateFromOverlaps(aSource, aDest, paramSetter, paramGetter,
                                  self.blockParamNames)
            # Now recalculate derived params with the mapped flux to minimize
            # potential numerical diffusion (e.g. control rod tip into large coolant)

        if destReactor.core.lib is not None:
            runLog.extra(
                f"Computing block-level reaction rates for {destReactor.core}")
            for b in aDest:
                globalFluxInterface.calcReactionRates(b,
                                                      destReactor.core.p.keff,
                                                      destReactor.core.lib)
示例#15
0
 def _transferFiles(self, initialPath, destinationPath, fileList):
     if not fileList:
         return
     if not os.path.exists(destinationPath):
         os.mkdir(destinationPath)
     for ff in fileList:
         fromPath = os.path.join(initialPath, ff)
         toPath = os.path.join(destinationPath, ff)
         runLog.extra("Moving {} to {}".format(fromPath, toPath))
         shutil.move(fromPath, toPath)
示例#16
0
    def enableBuGroupUpdates(self):
        """
        Turn on updating bu groups based on burnup

        See Also
        --------
        disableBuGroupUpdates
        """
        runLog.extra("Burnup group updating enabled")
        self._buGroupUpdatesEnabled = True
示例#17
0
文件: __init__.py 项目: youngmit/armi
def convertDatabase(
    inputDBName: str,
    outputDBName: Optional[str] = None,
    outputVersion: Optional[str] = None,
):
    """
    Convert database files between different versions.

    Parameters
    ----------
    inputDB
        name of the complete hierarchy database
    outputDB
        name of the output database that should be consistent with XTView
    outputVersion
        version of the database to convert to. Defaults to latest version
    """
    dbIn = databaseFactory(inputDBName, permission=Permissions.READ_ONLY_FME)

    if dbIn.version == outputVersion:
        runLog.important(
            "The input database ({}) appears to already be in the desired "
            "format ({})".format(inputDBName, dbIn.version)
        )
        return

    outputDBName = outputDBName or "-converted".join(os.path.splitext(inputDBName))
    dbOut = databaseFactory(
        outputDBName, permission=Permissions.CREATE_FILE_TIE, version=outputVersion
    )
    # each DB load resets the verbosity to that of the run. Here we allow
    # conversion users to overpower it.
    conversionVerbosity = runLog.getVerbosity()
    runLog.extra(f"Converting {dbIn} to DB version {outputVersion}")
    with dbIn, dbOut:
        # Making the bold assumption that we are working with HDF5
        h5In = _getH5File(dbIn)
        h5Out = _getH5File(dbOut)
        dbOut.writeInputsToDB(None, *dbIn.readInputsFromDB())

        for cycle, timeNode in dbIn.genTimeSteps():
            runLog.extra(f"Converting cycle={cycle}, timeNode={timeNode}")
            r = dbIn.load(cycle, timeNode)
            runLog.setVerbosity(conversionVerbosity)
            dbOut.writeToDB(r)

            for auxPath in dbIn.genAuxiliaryData((cycle, timeNode)):
                name = next(reversed(auxPath.split("/")))
                auxOutPath = dbOut.getAuxiliaryDataPath((cycle, timeNode), name)
                runLog.important(
                    "Copying auxiliary data for time ({}, {}): {} -> {}".format(
                        cycle, timeNode, auxPath, auxOutPath
                    )
                )
                h5In.copy(auxPath, h5Out, name=auxOutPath)
示例#18
0
    def _readPeakFluxes(self):
        """Read peak fluxes from output."""
        if self.opts.adjoint and not self.opts.real:
            runLog.extra(
                "Skipping peak flux update due to purely adjoint case")
            return

        stdoutReader = Dif3dStdoutReader(self.opts.outputFile)
        peakFluxes = stdoutReader.readRegionTotals()
        for b in self.r.core.getBlocks():
            b.p.fluxPeak = peakFluxes[getDIF3DStyleLocatorLabel(b)]
示例#19
0
文件: __init__.py 项目: guruprad/armi
def copyWithoutBlocking(src, dest):
    """
    Copy a file in a separate thread to avoid blocking while IO completes.

    Useful for copying large files while ARMI moves along.
    """
    files = "{} to {}".format(src, dest)
    runLog.extra("Copying (without blocking) {}".format(files))
    t = threading.Thread(target=shutil.copy, args=(src, dest))
    t.start()
    return t
示例#20
0
def runActions(o, r, cs, actions, numPerNode=None, serial=False):
    """Run a series of MpiActions in parallel, or in series if :code:`serial=True`.

    Notes
    -----
    The number of actions DOES NOT need to match :code:`armi.MPI_SIZE`.

    Calling this method may invoke MPI Split which will change the MPI_SIZE during the action. This allows someone to
    call MPI operations without being blocked by tasks which are not doing the same thing.
    """
    if not armi.MPI_DISTRIBUTABLE or serial:
        return runActionsInSerial(o, r, cs, actions)

    useForComputation = [True] * armi.MPI_SIZE
    if numPerNode != None:
        if numPerNode < 1:
            raise ValueError("numPerNode must be >= 1")
        numThisNode = {nodeName: 0 for nodeName in armi.MPI_NODENAMES}
        for rank, nodeName in enumerate(armi.MPI_NODENAMES):
            useForComputation[rank] = numThisNode[nodeName] < numPerNode
            numThisNode[nodeName] += 1
    numBatches = int(
        math.ceil(
            len(actions) /
            float(len([rank for rank in useForComputation if rank]))))
    runLog.extra("Running {} MPI actions in parallel over {} batches".format(
        len(actions), numBatches))

    queue = list(actions)  # create a new list.. we will use as a queue
    results = []
    batchNum = 0
    while queue:
        actionsThisRound = []
        for useRank in useForComputation:
            actionsThisRound.append(
                queue.pop(0) if useRank and queue else None)
        realActions = [(armi.MPI_NODENAMES[rank], rank, act)
                       for rank, act in enumerate(actionsThisRound)
                       if act is not None]
        batchNum += 1
        runLog.extra(
            "Distributing {} MPI actions for parallel processing (batch {} of {}):\n{}"
            .format(
                len(realActions),
                batchNum,
                numBatches,
                tabulate.tabulate(realActions,
                                  headers=["Nodename", "Rank", "Action"]),
            ))
        distrib = DistributionAction(actionsThisRound)
        distrib.broadcast()
        results.append(distrib.invoke(o, r, cs))
    return results
示例#21
0
    def _transferFiles(initialPath, destinationPath, fileList):
        """
        Transfer files into or out of the directory.

        This is used in ``moveFiles`` and ``retrieveFiles`` to shuffle files about when
        creating a target directory or when coming back, respectively. Beware that this
        uses ``shutil.copy()`` under the hood, which doesn't play nicely with
        directories. Future revisions should improve this.

        Parameters
        ----------
        initialPath : str
            Path to the folder to find files in.
        destinationPath: str
            Path to the folder to move file to.
        fileList : list of str or list of tuple
            File names to move from initial to destination. If this is a
            simple list of strings, the files will be transferred. Alternatively
            tuples of (initialName, finalName) are allowed if you want the file
            renamed during transit. In the non-tuple option, globs/wildcards
            are allowed.

        .. warning:: On Windows the max number of characters in a path is 260.
            If you exceed this you will see FileNotFound errors here.

        """
        if not fileList:
            return
        if not os.path.exists(destinationPath):
            os.mkdir(destinationPath)
        for pattern in fileList:
            if isinstance(pattern, tuple):
                # allow renames in transit
                fromName, destName = pattern
                copies = [(fromName, destName)]
            else:
                # expand globs if they're given
                copies = []
                for ff in glob.glob(pattern):
                    # renaming not allowed with globs
                    copies.append((ff, ff))

            for fromName, destName in copies:
                fromPath = os.path.join(initialPath, fromName)
                if not os.path.exists(fromPath):
                    runLog.warning(
                        f"{fromPath} does not exist and will not be copied.")
                    continue

                toPath = os.path.join(destinationPath, destName)
                runLog.extra("Copying {} to {}".format(fromPath, toPath))
                shutil.copy(fromPath, toPath)
示例#22
0
 def convert(self, r=None):
     """Create a new reactor with a uniform mesh."""
     runLog.extra("Building copy of {} with a uniform axial mesh".format(r))
     self._sourceReactor = r
     self.convReactor = self.initNewReactor(r)
     self._setParamsToUpdate()
     self._computeAverageAxialMesh()
     self._buildAllUniformAssemblies()
     self._clearStateOnReactor(self.convReactor)
     self._mapStateFromReactorToOther(self._sourceReactor, self.convReactor)
     self.convReactor.core.updateAxialMesh()
     self._checkConversion()
     return self.convReactor
示例#23
0
    def _copyPregeneratedXSFile(self, xsID):
        # stop a race condition to copy files between all processors
        if armi.MPI_RANK != 0:
            return

        for xsFileLocation, xsFileName in self._getPregeneratedXsFileLocationData(xsID):
            dest = os.path.join(os.getcwd(), xsFileName)
            runLog.extra(
                "Copying pre-generated XS file {} from {} for XS ID {}".format(
                    xsFileName, os.path.dirname(xsFileLocation), xsID
                )
            )
            shutil.copy(xsFileLocation, dest)
示例#24
0
    def invoke(self):
        from armi import cases

        if not os.path.exists(self.args.reference):
            runLog.error(
                "Could not find reference directory {}".format(self.args.reference)
            )
            sys.exit(1)

        if not os.path.exists(self.args.comparison):
            runLog.error(
                "Could not find comparison directory {}".format(self.args.comparison)
            )
            sys.exit(1)

        refSuite = cases.CaseSuite(self.cs)

        # contains all tests that user had access to
        allTests = []
        for pat in self.args.patterns + self.args.additional_comparisons:
            name, ext = os.path.splitext(pat)
            allTests.append(pat)
            if ext == ".yaml":
                # auto-add XML variants of yaml settings
                # to accommodate comparisons against xml suites (e.g. testing)
                xmlName = name + ".xml"
                runLog.extra("Including {} in reference patterns.".format(xmlName))
                allTests.append(xmlName)
        refSuite.discover(
            rootDir=self.args.reference,
            patterns=allTests,
            ignorePatterns=self.args.ignore,
        )

        cmpSuite = cases.CaseSuite(self.cs)
        cmpSuite.discover(
            rootDir=self.args.comparison,
            patterns=self.args.patterns,
            ignorePatterns=self.args.ignore,
        )

        nIssues = refSuite.compare(
            cmpSuite,
            weights=self.args.weights,
            tolerance=self.args.tolerance,
            exclusion=self.args.exclude,
            timestepMatchup=self.args.timestepMatchup,
        )

        if nIssues > 0:
            sys.exit(nIssues)
示例#25
0
    def disableBuGroupUpdates(self):
        """
        Turn off updating bu groups based on burnup

        Useful during reactivity coefficient calculations to be consistent with ref. run.

        See Also
        --------
        enableBuGroupUpdates
        """
        runLog.extra("Burnup group updating disabled")
        wasEnabled = self._buGroupUpdatesEnabled
        self._buGroupUpdatesEnabled = False
        return wasEnabled