Beispiel #1
0
    def _checkBurnupThresholds(self, blockList):
        """
        Check to see if burnup has changed meaningfully.

        If there are, then the xs sets should be regenerated.
        Otherwise then go ahead and skip xs generation.

        This is motivated by the idea that during very long explicit equilibrium runs,
        it might save time to turn off xs generation at a certain point.

        Parameters
        ----------
        blockList: iterable
            List of all blocks to examine

        Returns
        -------
        idsChangedBurnup: bool
            flag regarding whether or not burnup changed substantially

        """
        idsChangedBurnup = True
        if self._burnupTolerance > 0:
            idsChangedBurnup = False
            for b in blockList:
                xsID = b.getMicroSuffix()

                if xsID not in self._oldXsIdsAndBurnup:
                    # Looks like a new ID was found that was not in the old ID's
                    # have to regenerate the cross-sections this time around
                    self._oldXsIdsAndBurnup[xsID] = b.p.percentBu
                    idsChangedBurnup = True
                else:
                    # The id was found.  Now it is time to compare the burnups to determine
                    # if there has been enough meaningful change between the runs
                    buOld = self._oldXsIdsAndBurnup[xsID]
                    buNow = b.p.percentBu

                    if abs(buOld - buNow) > self._burnupTolerance:
                        idsChangedBurnup = True
                        # update the oldXs burnup to be the about to be newly generated xsBurnup
                        self._oldXsIdsAndBurnup[xsID] = buNow

                        runLog.important(
                            "Burnup has changed in xsID {} from {} to {}. "
                            "Recalculating Cross-sections".format(xsID, buOld, buNow)
                        )

            if not idsChangedBurnup:
                SkippingXsGen_BuChangedLessThanTolerance(self._burnupTolerance)

        return idsChangedBurnup
Beispiel #2
0
 def modifyEqPaths(self, modifiedPaths):
     """
     Modifies the geometry object by updating the equilibrium path indices and equilibrium path cycles.
     
     Parameters
     ----------
     modifiedPaths : dict, required
         This is a dictionary that contains the indices that are mapped to the eqPathIndex and eqPathCycle.
         modifiedPath[indices] = (eqPathIndex, eqPathCycle)
     """
     runLog.important("Modifying the equilibrium paths on {}".format(self))
     self.eqPathsHaveBeenModified = True
     self.eqPathInput.update(modifiedPaths)
Beispiel #3
0
    def convert(self, r=None, converterSettings=None):
        """
        Perform an axial expansion of the core.

        Notes
        -----
        This loops through the fuel blocks, making their height larger by a fraction of
        maxPercent. It reduces the homogenized actinide number densities to conserve
        atoms.

        This is a first approximation, adjusting the whole core uniformly and adjusting
        fuel with structure and everything.

        When fuel is locked to clad, this only expands the actinides! So the structural
        materials and sodium stay as they are in terms of density. By growing the mesh,
        we are introducing NEW ATOMS of these guys, thus violating conservation of
        atoms. However, the new ones are effectively piled up on top of the reactor
        where they are neutronically uninteresting.  This approximates fuel movement
        without clad/duct movement.
        """
        adjustFlags = Flags.FUEL | Flags.CLAD if self._fuelLockedToClad else Flags.FUEL
        adjustList = getAxialExpansionNuclideAdjustList(r, adjustFlags)

        runLog.extra(
            "Conserving mass during axial expansion for: {0}".format(str(adjustList))
        )

        # plenum shrinks so we should just measure the fuel height.
        oldMesh = r.core.p.axialMesh
        for a in r.core.getAssemblies(includeBolAssems=True):
            a.axiallyExpand(self._percent, adjustList)

        r.core.p.axialExpansionPercent = self._percent

        if not self._converterSettings["detailedAxialExpansion"]:
            # loop through again now that the reference is adjusted and adjust the non-fuel assemblies.
            refAssem = r.core.refAssem
            axMesh = refAssem.getAxialMesh()
            for a in r.core.getAssemblies(includeBolAssems=True):
                # See ARMI Ticket #112 for explanation of the commented out code
                a.setBlockMesh(
                    axMesh
                )  # , conserveMassFlag=True, adjustList=adjustList)

        r.core.updateAxialMesh()  # floating point correction
        newMesh = r.core.p.axialMesh
        runLog.important(
            "Adjusted full core fuel axial mesh uniformly "
            "{0}% from {1} cm to {2} cm.".format(self._percent, oldMesh, newMesh)
        )
Beispiel #4
0
    def adjustResolution(self, refA):
        """
        Split the blocks in this assembly to have the same mesh structure as refA.
        """
        newBlockStack = []

        newBlocks = 0  # number of new blocks we've added so far.
        for i, b in enumerate(self):
            refB = refA[
                i +
                newBlocks]  # pick the block that is "supposed to" line up with refB.

            # runLog.important('Dealing with {0}, ref b {1}'.format(b,refB))
            if refB.getHeight() == b.getHeight():
                # these blocks line up
                # runLog.important('They are the same.')
                newBlockStack.append(b)
                continue
            elif refB.getHeight() > b.getHeight():
                raise RuntimeError(
                    "can't split {0} ({1}cm) into larger blocks to match ref block {2} ({3}cm)"
                    "".format(b, b.getHeight(), refB, refB.getHeight()))
            else:
                # b is larger than refB. Split b up by splitting it into several smaller
                # blocks of refBs
                heightToChop = b.getHeight()
                heightChopped = 0.0
                while (abs(heightChopped - heightToChop) >
                       1e-5):  # stop when they are equal. floating point.
                    # update which ref block we're on (does nothing on the first pass)
                    refB = refA[i + newBlocks]
                    newB = copy.deepcopy(b)
                    newB.setHeight(
                        refB.getHeight())  # make block match ref mesh
                    newBlockStack.append(newB)
                    heightChopped += refB.getHeight()
                    newBlocks += 1
                    runLog.important(
                        "Added a new block {0} of height {1}".format(
                            newB, newB.getHeight()))
                    runLog.important("Chopped {0} of {1}".format(
                        heightChopped, heightToChop))
                newBlocks -= (
                    1  # subtract one because we eliminated the original b completely.
                )
        self.removeAll()
        self.spatialGrid = grids.axialUnitGrid(len(newBlockStack))
        for b in newBlockStack:
            self.add(b)
        self.reestablishBlockOrder()
Beispiel #5
0
def _writeAssemType(oldDB, newDB, typeNames):
    """
    Write TN/assemblies/type strings and TN/reactors/symmetry.

    assemblies/assemType was not a thing, need to find the blocks/assemTypeNum and map back to assemblies/type.
    """
    if "Materials" not in oldDB:
        runLog.important(
            "oldDB does not have a /Materials group, assuming assembly type set properly"
        )
        return

    ringPosToBlockIndex = {}

    for blockIndex, locationUniqueInt in enumerate(
            oldDB["Materials/Material"][:]):
        # block data is in a constant order, defined by Materials/Material which is an old-style location string
        uniqueIntAsString = "{:9d}".format(locationUniqueInt)
        ring = int(uniqueIntAsString[:-5])
        pos = int(uniqueIntAsString[-5:-2])
        axial = int(uniqueIntAsString[-2:])

        if axial != 0:  # assume the bottom is grid plate and might have incorrect type
            ringPosToBlockIndex[(ring, pos)] = blockIndex

    for timestep in (k for k in newDB.keys() if re.match(r"^\d+$", k)):
        rings = newDB["{}/assemblies/Ring".format(timestep)][:]
        positions = newDB["{}/assemblies/Pos".format(timestep)][:]
        assemTypes = []

        # so, we could use newDB['{}/blocks/assemType'], which would be more direct, but computing from the original may
        # be more forward compatible in that there may not be a need for blocks to have assemType in the future
        blockTypeNums = oldDB["{}/blocks/typeNumAssem".format(timestep)][:]
        fullCore = False
        for ring, pos in zip(rings, positions):
            if (ring, pos) == (3, 5):
                fullCore = True
            assemTypes.append(typeNames["typeNumAssem"][blockTypeNums[
                ringPosToBlockIndex[ring, pos]]])

        runLog.important("writing {}/assemblies/type".format(timestep))
        newDB["{}/assemblies/type".format(timestep)] = assemTypes

        # dynamically determining the symmetry is a bit awkward, since we have no way to know what the boundary
        # condition was without the original inputs (presumably they exist...)
        symmetry = (geometry.FULL_CORE if fullCore else geometry.THIRD_CORE +
                    geometry.PERIODIC)
        newDB["{}/reactors/symmetry".format(timestep)] = [symmetry]
        runLog.warning("Determined {}/reactors/symmetry to be `{}`.".format(
            timestep, symmetry))
Beispiel #6
0
    def _getSpecificReferrers(self, klass, ancestorKlass):
        """Try to determine some useful information about the structure of ArmiObjects and potential
        orphans.

        This takes a class and an expected/nominal parent class, which should both be instances of
        ArmiObject. It will then locate all instances of klass that are tracked by the GC, igoring
        those that have an ancestor of ancestorKlass type. A report will be generated containing
        the counts of the instances of klass that are _not_ part of the ancestor_class along with
        their referrer class.

        This is useful for diagnosing memory leaks, as it points to unexpected referrers to
        ArmiObjects.
        """
        if not issubclass(klass, ArmiObject) or not issubclass(
            ancestorKlass, ArmiObject
        ):
            raise TypeError(
                "klass and ancestorKlass should be subclasses of ArmiObject"
            )

        # info will be a list containing a tuple for every instance of klass that does not have an
        # ancestorKlass somewhere in its chain of parents. Each tuple contains its parent object
        # and the set of classes of objects that refer to it
        info = []
        nominalCount = 0
        exampleObj = None
        maxObjects = 100
        objectsSoFar = 0
        for obj in (o for o in gc.get_objects() if isinstance(o, klass)):
            if objectsSoFar > maxObjects:
                break

            isNominal = False
            o2 = obj
            while o2.parent is not None:
                if isinstance(o2.parent, ancestorKlass):
                    isNominal = True
                    break
                o2 = o2.parent
            runLog.important("isNominal: {} parent: {}".format(isNominal, obj.parent))
            if isNominal:
                nominalCount += 1
            else:
                exampleObj = obj
                objectsSoFar += 1
                referrers = gc.get_referrers(obj)
                referrerClasses = {type(o) for o in referrers}
                info.append((obj.parent, referrerClasses))

        if exampleObj is not None:
            runLog.important("Walking referrers for {}".format(exampleObj))
            _walkReferrers(exampleObj, maxLevel=8)
            raise RuntimeError("All done")

        runLog.important(
            "List of {} orphaned ArmiObjects (obj.parent, {{referring object "
            "classes}})".format(len(info))
        )
        for item in info:
            runLog.important("{}".format(item))
Beispiel #7
0
    def buildMacros(
        self,
        lib=None,
        bListSome=None,
        buildScatterMatrix=True,
        buildOnlyCoolant=False,
        libType="micros",
    ):
        """
        Builds block-level macroscopic cross sections for making diffusion equation matrices.

        This will use MPI if armi.context.MPI_SIZE > 1

        Builds G-vectors of the basic XS ('nGamma','fission','nalph','np','n2n','nd','nt')
        Builds GxG matrices for scatter matrices

        Parameters
        ----------
        lib : library object , optional
            If lib is specified, then buildMacros will build macro XS using micro XS data from lib.
            If lib = None, then buildMacros will use the existing library self.r.core.lib. If that does
            not exist, then buildMacros will use a new nuclearDataIO.ISOTXS object.

        buildScatterMatrix : Boolean, optional
            If True, all macro XS will be built, including the time-consuming scatter matrix.
            If False, only the macro XS that are needed for fluxRecon.computePinMGFluxAndPower
            will be built. These include 'transport', 'fission', and a few others. No ng x ng
            matrices (such as 'scatter' or 'chi') will be built. Essentially, this option
            saves huge runtime for the fluxRecon module.

        buildOnlyCoolant : Boolean, optional
            If True, homogenized macro XS will be built only for NA-23.
            If False, the function runs normally.

        libType : str, optional
            The block attribute containing the desired microscopic XS for this block:
            either "micros" for neutron XS or "gammaXS" for gamma XS.

        """
        cycle = self.r.p.cycle
        self.macrosLastBuiltAt = (
            sum([self.r.o.burnSteps[i] + 1
                 for i in range(cycle)]) + self.r.p.timeNode)

        runLog.important("Building macro XS")
        xsGen = MacroXSGenerator(bListSome, lib, buildScatterMatrix,
                                 buildOnlyCoolant, libType)
        xsGen.broadcast()
        xsGen.invoke(self.o, self.r, self.cs)
    def updatePhysicsCouplingControl(self):
        """
        Disable XS update in equilibrium cases after a while.

        Notes
        -----
        This is only relevant for equilibrium cases. We have to turn
        off XS updates after several cyclics or else the number densities
        will never converge.
        """
        if self.r.core.p.cyclics >= self.cs["numCyclicsBeforeStoppingXS"]:
            self.enabled(False)
            runLog.important(
                "Disabling {} because numCyclics={}".format(self, self.r.core.p.cyclics)
            )
Beispiel #9
0
 def _reactorAssemblyTrackingBreakdown(self):
     runLog.important("Reactor attribute ArmiObject tracking count")
     for attrName, attrObj in self.r.core.__dict__.items():
         if (isinstance(attrObj, list) and attrObj
                 and isinstance(attrObj[0], ArmiObject)):
             runLog.important("List {:30s} has {:4d} assemblies".format(
                 attrName, len(attrObj)))
         if (isinstance(attrObj, dict) and attrObj
                 and isinstance(list(attrObj.values())[0], ArmiObject)):
             runLog.important("Dict {:30s} has {:4d} assemblies".format(
                 attrName, len(attrObj)))
     runLog.important("SFP has {:4d} assemblies".format(len(
         self.r.core.sfp)))
     runLog.important("CFP has {:4d} assemblies".format(len(
         self.r.core.cfp)))
Beispiel #10
0
 def invoke(self):
     with directoryChangers.DirectoryChanger(self.args.suiteDir,
                                             dumpOnException=False):
         suite = cases.CaseSuite(self.cs)
         suite.discover(patterns=self.args.patterns,
                        ignorePatterns=self.args.ignore)
         if self.args.list:
             suite.echoConfiguration()
         else:
             for ci, case in enumerate(suite):
                 runLog.important(
                     f"Running case {ci+1}/{len(suite)}: {case}")
                 with directoryChangers.DirectoryChanger(case.directory):
                     settings.setMasterCs(case.cs)
                     case.run()
Beispiel #11
0
def _setBlueprintNumberOfAxialMeshes(meshPoints, factor):
    """
    Set the blueprint number of axial mesh based on the axial mesh refinement factor.
    """
    if factor <= 0:
        raise ValueError("A positive axial mesh refinement factor "
                         f"must be provided. A value of {factor} is invalid.")

    if factor != 1:
        runLog.important(
            "An axial mesh refinement factor of {} is applied "
            "to blueprint based on setting specification.".format(factor),
            single=True,
        )
    return int(meshPoints) * factor
Beispiel #12
0
    def applyUniformMesh(sourceAssem, newMesh):
        newAssem = UniformMeshGeometryConverter._createNewAssembly(sourceAssem)
        bottom = 0.0
        for topMeshPoint in newMesh:
            overlappingBlockInfo = sourceAssem.getBlocksBetweenElevations(
                bottom, topMeshPoint)
            if not overlappingBlockInfo:
                # this could be handled by duplicating the block but with the current CR module
                # this situation should just never happen.
                raise RuntimeError(
                    "No block found between {:.3f} and {:.3f} in assembly {}"
                    "".format(bottom, topMeshPoint, sourceAssem))

            # If there are FUEL or CONTROL blocks that are overlapping with the other blocks then the first
            # one is selected to ensure that the correct XS ID is applied to the new block during the deepcopy.
            sourceBlock = None
            specialXSType = None
            for potentialBlock, _overlap in overlappingBlockInfo:
                if sourceBlock is None:
                    sourceBlock = potentialBlock
                if (potentialBlock.hasFlags([Flags.FUEL, Flags.CONTROL])
                        and potentialBlock != sourceBlock):
                    runLog.important(
                        "There are multiple overlapping blocks.  Choosing {} for {} XS sets."
                        .format(potentialBlock.getType(),
                                sourceBlock.getType()))
                    if specialXSType is None:
                        sourceBlock = potentialBlock
                        specialXSType = sourceBlock.p.xsType
                    elif specialXSType == potentialBlock.p.xsType:
                        pass
                    else:
                        runLog.error(
                            "There are two special block XS types.  Not sure which to choose {} {}"
                            "".format(sourceBlock, potentialBlock))
                        raise RuntimeError(
                            "There are multiple special block XS types when there should only be one"
                        )

            block = copy.deepcopy(sourceBlock)
            block.setHeight(topMeshPoint - bottom)
            block.p.axMesh = 1
            _setNumberDensitiesFromOverlaps(block, overlappingBlockInfo)
            newAssem.add(block)
            bottom = topMeshPoint
        newAssem.reestablishBlockOrder()
        newAssem.calculateZCoords()
        return newAssem
Beispiel #13
0
    def run(self):
        """
        Run each case, one after the other.

        .. warning: Suite running may not work yet if the cases have interdependencies.
                    We typically run on a HPC but are still working on a platform
                    independent way of handling HPCs.

        """
        for ci, case in enumerate(self):
            runLog.important(f"Running case {ci+1}/{len(self)}: {case}")
            with directoryChangers.DirectoryChanger(case.directory):
                settings.setMasterCs(case.cs)
                try:
                    case.run()
                except:  # pylint: disable=bare-except; allow it at this level to run all cases
Beispiel #14
0
    def run(self):
        """Perform DRAGON calculation for the current input file."""
        runLog.important(
            "Preparing to run DRAGON with executable: "
            f"{self.options.executablePath}, on input: {self.options.inputFile}"
        )
        self.writeInput()

        inputs, outputs = self._collectIONames()

        with directoryChangers.ForcedCreationDirectoryChanger(
            self.options.runDir,
            filesToMove=inputs,
            filesToRetrieve=outputs,
        ):
            self._execute()
Beispiel #15
0
    def displayMemoryUsage(self, timeDescription):
        r"""
        Print out some information to stdout about the memory usage of ARMI.

        Makes use of the asizeof utility.

        Useful when the debugMem setting is set to True.

        Turn these on as appropriate to find all your problems.
        """
        runLog.important(
            "----- Memory Usage Report at {} -----".format(timeDescription))
        self._printFullMemoryBreakdown(startsWith="",
                                       reportSize=self.cs["debugMemSize"])
        self._reactorAssemblyTrackingBreakdown()
        runLog.important("----- End Memory Usage Report at {} -----".format(
            timeDescription))
Beispiel #16
0
    def summarizeDesign(self, generateFullCoreMap=True, showBlockAxialMesh=True):
        """Uses the ReportInterface to create a fancy HTML page describing the design inputs."""
        settings.setMasterCs(self.cs)
        o = self.initializeOperator()
        with DirectoryChanger(self.cs.inputDirectory):
            # There are global variables that are modified when a report is
            # generated, so reset it all
            six.moves.reload_module(report)  # pylint: disable=too-many-function-args
            self.cs.setSettingsReport()
            rpi = o.getInterface("report")

            if rpi is None:
                rpi = reportInterface.ReportInterface(o.r, o.cs)

            rpi.generateDesignReport(generateFullCoreMap, showBlockAxialMesh)
            report.DESIGN.writeHTML()
            runLog.important("Design report summary was successfully generated")
Beispiel #17
0
    def growToFullCore(self):
        """
        Convert geometry input to full core.

        Notes
        -----
        This only works for Hex 1/3rd core geometry inputs.
        """
        if self.symmetry.domain == geometry.DomainType.FULL_CORE:
            # already full core from geometry file. No need to copy symmetry over.
            runLog.important(
                "Detected that full core geometry already exists. Cannot expand."
            )
            return
        elif (
            self.symmetry.domain != geometry.DomainType.THIRD_CORE
            or self.symmetry.boundary != geometry.BoundaryType.PERIODIC
        ):
            raise ValueError(
                "Cannot convert shape `{}` to full core, must be {}".format(
                    self.symmetry.domain,
                    str(
                        geometry.SymmetryType(
                            geometry.DomainType.THIRD_CORE,
                            geometry.BoundaryType.PERIODIC,
                        )
                    ),
                ),
            )

        grid = grids.HexGrid.fromPitch(1.0)
        grid._symmetry: str = str(self.symmetry)

        # need to cast to a list because we will modify during iteration
        for (ring, pos), specifierID in list(self.assemTypeByIndices.items()):
            indices = grids.HexGrid.getIndicesFromRingAndPos(ring, pos)
            for symmetricI, symmetricJ in grid.getSymmetricEquivalents(indices):
                symmetricRingPos = grids.HexGrid.indicesToRingPos(
                    symmetricI, symmetricJ
                )
                self.assemTypeByIndices[symmetricRingPos] = specifierID

        self.symmetry = geometry.SymmetryType(
            geometry.DomainType.FULL_CORE,
            geometry.BoundaryType.NO_SYMMETRY,
        )
Beispiel #18
0
    def _activateDB(self):
        """
        Instantiate the database state.

        Notes
        -----
        This happens here rather than on the database interface, as the database
        interacts near the end of the stack. Some interactBOL methods may be
        dependent on having data in the database, such as calls to history tracker
        during a restart run.
        """
        dbi = self.o.getInterface("database")
        if not dbi.enabled():
            return
        dbi.initDB()
        if (
            self.cs["loadStyle"] != "fromInput"
            and self.cs["runType"] != operators.RunTypes.SNAPSHOTS
        ):
            # load case before going forward with normal cycle
            runLog.important("MainInterface loading from DB")

            # Load the database from the point just before start cycle and start node
            # as the run will continue at the begining of start cycle and start node,
            # and the database contains the values from the run at the end of the
            # interface stack, which are what the start start cycle and start node
            # should begin with.
            dbCycle, dbNode = utils.getPreviousTimeStep(
                self.cs["startCycle"], self.cs["startNode"], self.cs["burnSteps"]
            )
            try:
                # NOTE: this should be the responsibility of the database, but cannot
                # because the Database is last in the stack and the MainInterface is
                # first
                dbi.prepRestartRun(dbCycle, dbNode)
            except:
                runLog.error(
                    "Could not load the initial state as requested. DB `{}` does "
                    "not exist or does not have enough time steps to load this time "
                    "(cycle={}, tn={})"
                    "".format(self.cs["reloadDBName"], dbCycle, dbNode)
                )
                raise
            self.r.p.cycle = self.cs["startCycle"]
            self.r.p.timeNode = self.cs["startNode"]
Beispiel #19
0
    def writeGeom(self, outputFileName, suffix=""):
        """
        Write data out as a geometry xml file

        Parameters
        ----------
        outputFileName : str
            Geometry file name

        suffix : str
            Added suffix to the geometry output file name

        """
        if suffix:
            self._getModifiedFileName(outputFileName, suffix)
            outputFileName = self.modifiedFileName

        runLog.important(
            "Writing reactor geometry file as {}".format(outputFileName))
        root = ET.Element(
            INP_SYSTEMS,
            attrib={
                INP_GEOM: str(self.geomType),
                INP_SYMMETRY: str(self.symmetry),
            },
        )
        tree = ET.ElementTree(root)
        # start at ring 1 pos 1 and go out
        for targetIndices in sorted(list(self.assemTypeByIndices)):
            ring, pos = targetIndices
            assembly = ET.SubElement(root, "assembly")
            assembly.set("ring", str(ring))
            assembly.set("pos", str(pos))
            fuelPath, fuelCycle = self.eqPathInput.get((ring, pos),
                                                       (None, None))
            if fuelPath is not None:
                # set the equilibrium shuffling info if it exists
                assembly.set(INP_FUEL_PATH, str(fuelPath))
                assembly.set(INP_FUEL_CYCLE, str(fuelCycle))

            aType = self.assemTypeByIndices[targetIndices]
            assembly.set("name", aType)
        # note: This is ugly and one-line, but that's ok
        # since we're transitioning.
        tree.write(outputFileName)
Beispiel #20
0
def addDummyNuclidesToLibrary(lib, dummyNuclides):
    """
    This method adds DUMMY nuclides to the current GAMISO library.

    Parameters
    ----------
    lib : obj
        GAMISO library object

    dummyNuclides: list
        List of DUMMY nuclide objects that will be copied and added to the GAMISO file

    Notes
    -----
    Since MC2-3 does not write DUMMY nuclide information for GAMISO files, this is necessary to provide a
    consistent set of nuclide-level data across all the nuclides in a
    :py:class:`~armi.nuclearDataIO.xsLibraries.XSLibrary`.
    """
    if not dummyNuclides:
        runLog.important(
            "No dummy nuclide data provided to be added to {}".format(lib))
        return False
    if len(lib.xsIDs) > 1:
        runLog.warning(
            "Cannot add dummy nuclide data to GAMISO library {} containing data for more than 1 XS ID."
            .format(lib))
        return False
    dummyNuclideKeysAddedToLibrary = []
    for dummyNuclide in dummyNuclides:
        dummyKey = dummyNuclide.nucLabel + lib.xsIDs[0]
        if dummyKey in lib:
            continue
        runLog.debug("Adding {} nuclide data to {}".format(dummyKey, lib))
        newDummy = xsNuclides.XSNuclide(lib, dummyKey)
        # Copy gamiso metadata from the isotxs metadata of the given dummy nuclide
        for kk, vv in dummyNuclide.isotxsMetadata.items():
            if vv in ["jj", "jband"]:
                newDummy.gamisoMetadata[vv] = {}
                for mm in dummyNuclide.isotxsMetadata[vv]:
                    newDummy.gamisoMetadata[vv][mm] = 1
            newDummy.gamisoMetadata[kk] = dummyNuclide.isotxsMetadata[kk]
        lib[dummyKey] = newDummy
        dummyNuclideKeysAddedToLibrary.append(dummyKey)

    return any(dummyNuclideKeysAddedToLibrary)
Beispiel #21
0
def writeCycleSummary(core):
    r"""Prints a cycle summary to the runLog

    Parameters
    ----------
    core:  armi.reactor.reactors.Core
    cs: armi.settings.caseSettings.Settings
    """
    ## would io be worth considering for this?
    cycle = core.r.p.cycle
    str_ = []
    runLog.important("Cycle {0} Summary:".format(cycle))
    avgBu = core.calcAvgParam("percentBu", typeSpec=Flags.FUEL, generationNum=2)
    str_.append("Core Average Burnup: {0}".format(avgBu))
    str_.append("Idealized Outlet Temperature {}".format(core.p.THoutletTempIdeal))
    str_.append("End of Cycle {0:02d}. Timestamp: {1} ".format(cycle, time.ctime()))

    runLog.info("\n".join(str_))
Beispiel #22
0
def addDummyNuclidesToLibrary(lib, dummyNuclides):
    """
    This method adds DUMMY nuclides to the current PMATRX library.

    Parameters
    ----------
    lib : obj
        PMATRX  library object

    dummyNuclides: list
        List of DUMMY nuclide objects that will be copied and added to the PMATRX file

    Notes
    -----
    Since MC2-3 does not write DUMMY nuclide information for PMATRX files, this is necessary to provide a
    consistent set of nuclide-level data across all the nuclides in a
    :py:class:`~armi.nuclearDataIO.xsLibraries.XSLibrary`.
    """
    if not dummyNuclides:
        runLog.important("No dummy nuclide data provided to be added to {}".format(lib))
        return False
    if len(lib.xsIDs) > 1:
        runLog.warning(
            "Cannot add dummy nuclide data to PMATRX library {} containing data for more than 1 XS ID.".format(
                lib
            )
        )
        return False
    dummyNuclideKeysAddedToLibrary = []
    for dummy in dummyNuclides:
        dummyKey = dummy.nucLabel + lib.xsIDs[0]
        if dummyKey in lib:
            continue
        runLog.debug("Adding {} nuclide data to {}".format(dummyKey, lib))
        newDummy = xsNuclides.XSNuclide(lib, dummyKey)
        newDummy.pmatrxMetadata["hasNeutronHeatingAndDamage"] = False
        newDummy.pmatrxMetadata["maxScatteringOrder"] = 0
        newDummy.pmatrxMetadata["hasGammaHeating"] = False
        newDummy.pmatrxMetadata["numberNeutronXS"] = 0
        newDummy.pmatrxMetadata["collapsingRegionNumber"] = 0
        lib[dummyKey] = newDummy
        dummyNuclideKeysAddedToLibrary.append(dummyKey)

    return any(dummyNuclideKeysAddedToLibrary)
Beispiel #23
0
    def _checkForDuplicateObjectsOnArmiModel(self, attrName, refObject):
        """
        Scans thorugh ARMI model for duplicate objects
        """
        if self.r is None:
            return
        uniqueIds = set()
        uniqueObjTypes = set()

        def checkAttr(subObj):
            if getattr(subObj, attrName, refObject) != refObject:
                uniqueIds.add(id(getattr(subObj, attrName)))
                uniqueObjTypes.add(subObj.__class__.__name__)

        for a in self.r.core.getAssemblies(includeAll=True):
            checkAttr(a)
            for b in a:
                checkAttr(b)
                for c in b:
                    checkAttr(c)
                    checkAttr(c.material)

        for i in self.o.getInterfaces():
            checkAttr(i)
            if i.name == "xsGroups":
                for _, block in i.representativeBlocks.items():
                    checkAttr(block)

        if len(uniqueIds) == 0:
            runLog.important(
                "There are no duplicate `.{}` attributes".format(attrName))
        else:
            runLog.error(
                "There are {} unique objects stored as `.{}` attributes!\n"
                "Expected id {}, but got {}.\nExpected object:{}\n"
                "These types of objects had unique attributes: {}".format(
                    len(uniqueIds) + 1,
                    attrName,
                    id(refObject),
                    uniqueIds,
                    refObject,
                    ", ".join(uniqueObjTypes),
                ))
            raise RuntimeError
Beispiel #24
0
def compareNuclideXS(nuc1, nuc2):
    equal = nuc1.pmatrxMetadata.compare(nuc2.pmatrxMetadata, nuc1.container,
                                        nuc2.container)
    for attrName in [
            "neutronHeating",
            "neutronDamage",
            "gammaHeating",
            "isotropicProduction",
            "linearAnisotropicProduction",
            "nOrderProductionMatrix",
    ]:
        val1 = getattr(nuc1, attrName)
        val2 = getattr(nuc2, attrName)
        if not properties.numpyHackForEqual(val1, val2):
            runLog.important(
                "{} and {} have different `{}` attributes:\n{}\n{}".format(
                    nuc1, nuc2, attrName, val1, val2))
            equal &= False
    return equal
Beispiel #25
0
def _copyValidDatasets(newDB, typeNames, name, dataset):
    if isinstance(dataset, h5py.Group):
        runLog.important("Skipping Group {}".format(dataset))
        return

    elif name.startswith("StringMappings"):
        runLog.important("Skipping dataset {}".format(dataset))
        return

    runLog.important("Visiting Dataset {}".format(name))
    try:
        # '0/blocks/nNa' -> _node = '0', paramType = 'blocks', paramName = 'nNa'
        node, paramType, paramName = name.split("/")
        validParameters = VALID_PARAMETERS_BY_GROUP[paramType]

        if validParameters is None:
            runLog.warning(
                "Unexpected entry in database `{}` being copied.".format(name))

        elif paramName in parameters.RENAMES:
            runLog.important("Renaming `{}` -> `{}`.".format(
                paramName, parameters.RENAMES[paramName]))
            paramName = parameters.RENAMES[paramName]

        elif paramName in {"typeNumBlock"}:
            newName = "type"
            runLog.important("Renaming `{}` -> `{}`.".format(
                paramName, newName))
            newDB["{}/{}/{}".format(node, paramType, newName)] = [
                typeNames[paramName][oldVal] for oldVal in dataset.value[:]
            ]
            return

        elif paramName not in validParameters:
            runLog.warning(
                "Invalid Parameter {} in the Database. Deleting Parameter `{}` from Dataset `{}`"
                .format(paramName, paramName, dataset.name))
            return

        newDB.copy(dataset, "{}/{}/{}".format(node, paramType, paramName))
    except ValueError:
        # Skip checking for an invalid parameter if the structure is not correct (i.e., not length 3)
        newDB.copy(dataset, dataset.name)
    def _newLibraryShouldBeCreated(self, cycle, representativeBlockList, xsIDs):
        """
        Determines whether the cross section generator should be executed at this cycle.

        Criteria include:

            #. genXS setting is turned on
            #. We are beyond any requested skipCycles (restart cycles)
            #. The blocks have changed burnup beyond the burnup threshold
            #. Lattice physics kernel (e.g. MC2) hasn't already been executed for this cycle
            (possible if it runs during fuel handling)

        """
        executeXSGen = bool(self.cs["genXS"] and cycle >= self.cs["skipCycles"])
        idsChangedBurnup = self._checkBurnupThresholds(representativeBlockList)
        if executeXSGen and not idsChangedBurnup:
            executeXSGen = False

        if self.r.core._lib is not None:  # pylint: disable=protected-access
            # justification=r.core.lib property can raise exception or load pre-generated
            # ISOTXS, but the interface should have responsibilty of loading
            # XS's have already generated for this cycle (maybe during fuel management). Should we update due to
            # changes that occurred during fuel management?
            missing = set(xsIDs) - set(self.r.core.lib.xsIDs)
            if missing and not executeXSGen:
                runLog.warning(
                    "Even though XS generation is not activated, new XS {0} are needed. "
                    "Perhaps a booster came in.".format(missing)
                )
            elif missing:
                runLog.important(
                    "New XS sets {0} will be generated for this cycle".format(missing)
                )
            else:
                runLog.important(
                    "No new XS needed for this cycle. {0} exist. Skipping".format(
                        self.r.core.lib.xsIDs
                    )
                )
                executeXSGen = False  # no newXs

        return executeXSGen
Beispiel #27
0
    def count(self):
        if not self.getChildren():
            return
        runLog.important("Count:")
        totCount = 0
        thisTimeCount = 0
        a = self.getChildren()[0]
        lastTime = a.getAge() / units.DAYS_PER_YEAR + a.p.chargeTime

        for a in self.getChildren():
            thisTime = a.getAge() / units.DAYS_PER_YEAR + a.p.chargeTime

            if thisTime != lastTime:
                runLog.important(
                    "Number of assemblies moved at t={0:6.2f}: {1:04d}. Cumulative: {2:04d}"
                    .format(lastTime, thisTimeCount, totCount))
                lastTime = thisTime
                thisTimeCount = 0
            totCount += 1
            thisTimeCount += 1
Beispiel #28
0
    def compare(self) -> int:
        """Compare the source to the reference database, updating self.results with the findings"""
        runLog.important("Comparing databases \n:REF: {} \n:SRC: {}"
                         "".format(repr(self.ref), repr(self.src)))

        self.differences = _DifferencesOutputer()

        try:
            srcTS, refTS = self._compareTimeSteps()
            self._compareReactorStates(srcTS, refTS)
            self._compareAssemblyStates(srcTS, refTS)
            self._compareBlockStates(srcTS, refTS)
        except exceptions.NoDataModelInDatabaseException:
            msg = "Missing or incomplete data model in one or both databases."
            runLog.important(msg)
            self.differences.errors.append(msg)

        self._compareMiscData()

        return self.differences.nDifferences()
Beispiel #29
0
 def cleanLastCycleFiles(self):
     r"""Delete ARMI files from previous cycle that aren't necessary for the next cycle.
     Unless you're doing reloads, of course."""
     runLog.important("Cleaning ARMI files due to reallySmallRun option")
     for fileName in os.listdir(os.getcwd()):
         # clean MC**2 and REBUS inputs and outputs
         for candidate in [".BCD", ".inp", ".out", "ISOTXS-"]:
             if candidate in fileName:
                 # Do not remove .htos.out files.
                 if ".htos.out" in fileName:
                     continue
                 if re.search(r"mcc[A-Z0-9]+\.inp", fileName):
                     continue
                 # don't remove mccIA1.inp stuff in case we go out of a burnup bound.
                 try:
                     os.remove(fileName)
                 except OSError:
                     runLog.warning(
                         "Error removing file {0} during cleanup. It is still in use,"
                         " probably".format(fileName))
Beispiel #30
0
def summarizePower(core):
    r"""provide an edit showing where the power is based on assembly types.

    Parameters
    ----------
    core : armi.reactor.reactors.Core
    """
    sums = collections.defaultdict(lambda: 0.0)
    pmult = core.powerMultiplier
    for a in core:
        sums[a.getType()] += a.calcTotalParam("power") * pmult

    # calculate total power
    tot = sum(sums.values()) or float("inf")
    ## NOTE: if tot is 0.0, set to infinity to prevent ZeroDivisionError

    runLog.important("Power summary")
    for atype, val in sums.items():
        runLog.important(" Power in {0:35s}: {1:0.3E} Watts, {2:0.5f}%".format(
            atype, val, val / tot * 100))