Beispiel #1
0
    def test_setVerbosity(self):
        """Let's test the setVerbosity() method carefully"""
        with mockRunLogs.BufferLog() as mock:
            # we should start with a clean slate
            self.assertEqual("", mock._outputStream)
            runLog.LOG.startLog("test_setVerbosity")
            runLog.LOG.setVerbosity(logging.INFO)

            # we should start at info level, and that should be working correctly
            self.assertEqual(runLog.LOG.getVerbosity(), logging.INFO)
            runLog.info("hi")
            self.assertIn("hi", mock._outputStream)
            mock._outputStream = ""

            runLog.debug("invisible")
            self.assertEqual("", mock._outputStream)

            # setVerbosity() to WARNING, and verify it is working
            runLog.LOG.setVerbosity(logging.WARNING)
            runLog.info("still invisible")
            self.assertEqual("", mock._outputStream)
            runLog.warning("visible")
            self.assertIn("visible", mock._outputStream)
            mock._outputStream = ""

            # setVerbosity() to DEBUG, and verify it is working
            runLog.LOG.setVerbosity(logging.DEBUG)
            runLog.debug("Visible")
            self.assertIn("Visible", mock._outputStream)
            mock._outputStream = ""

            # setVerbosity() to ERROR, and verify it is working
            runLog.LOG.setVerbosity(logging.ERROR)
            runLog.warning("Still Invisible")
            self.assertEqual("", mock._outputStream)
            runLog.error("Visible!")
            self.assertIn("Visible!", mock._outputStream)

            # we shouldn't be able to setVerbosity() to a non-canonical value (logging module defense)
            self.assertEqual(runLog.LOG.getVerbosity(), logging.ERROR)
            runLog.LOG.setVerbosity(logging.WARNING + 1)
            self.assertEqual(runLog.LOG.getVerbosity(), logging.WARNING)
Beispiel #2
0
    def invoke(self):
        from armi.bookkeeping.db.database3 import Database3

        if all(li is None for li in
               [self.args.blueprints, self.args.geom, self.args.settings]):
            runLog.error(
                "No settings, blueprints, or geometry files specified; "
                "nothing to do.")
            return -1

        bp = None
        settings = None
        geom = None

        if self.args.blueprints is not None:
            bp = resolveMarkupInclusions(pathlib.Path(
                self.args.blueprints)).read()

        if self.args.geom is not None:
            with open(self.args.geom, "r") as f:
                geom = f.read()

        if self.args.settings is not None:
            settings = resolveMarkupInclusions(pathlib.Path(
                self.args.settings)).read()

        db = Database3(self.args.h5db, "a")

        with db:
            # Not calling writeInputsToDb, since it makes too many assumptions about
            # where the inputs are coming from, and which ones we want to write.
            # Instead, we assume that we know where to store them, and do it ourselves.
            for data, key in [
                (bp, "blueprints"),
                (geom, "geomFile"),
                (settings, "settings"),
            ]:
                if data is not None:
                    dSetName = "inputs/" + key
                    if dSetName in db.h5db:
                        del db.h5db[dSetName]
                    db.h5db[dSetName] = data
Beispiel #3
0
    def invoke(self):
        from armi import cases

        if not os.path.exists(self.args.reference):
            runLog.error("Could not find reference directory {}".format(
                self.args.reference))
            sys.exit(1)

        if not os.path.exists(self.args.comparison):
            runLog.error("Could not find comparison directory {}".format(
                self.args.comparison))
            sys.exit(1)

        refSuite = cases.CaseSuite(self.cs)

        # contains all tests that user had access to
        allTests = []
        for pat in self.args.patterns + self.args.additional_comparisons:
            allTests.append(pat)
        refSuite.discover(
            rootDir=self.args.reference,
            patterns=allTests,
            ignorePatterns=self.args.ignore,
        )

        cmpSuite = cases.CaseSuite(self.cs)
        cmpSuite.discover(
            rootDir=self.args.comparison,
            patterns=self.args.patterns,
            ignorePatterns=self.args.ignore,
        )

        nIssues = refSuite.compare(
            cmpSuite,
            weights=self.args.weights,
            tolerance=self.args.tolerance,
            exclusion=self.args.exclude,
            timestepMatchup=self.args.timestepMatchup,
        )

        if nIssues > 0:
            sys.exit(nIssues)
Beispiel #4
0
    def _activateDB(self):
        """
        Instantiate the database state.

        Notes
        -----
        This happens here rather than on the database interface, as the database
        interacts near the end of the stack. Some interactBOL methods may be
        dependent on having data in the database, such as calls to history tracker
        during a restart run.
        """
        dbi = self.o.getInterface("database")
        if not dbi.enabled():
            return
        dbi.initDB()
        if (self.cs["loadStyle"] != "fromInput"
                and self.cs["runType"] != operators.RunTypes.SNAPSHOTS):
            # load case before going forward with normal cycle
            runLog.important("MainInterface loading from DB")

            # Load the database from the point just before start cycle and start node
            # as the run will continue at the begining of start cycle and start node,
            # and the database contains the values from the run at the end of the
            # interface stack, which are what the start start cycle and start node
            # should begin with.
            dbCycle, dbNode = utils.getPreviousTimeStep(
                self.cs["startCycle"], self.cs["startNode"],
                self.cs["burnSteps"])
            try:
                # NOTE: this should be the responsibility of the database, but cannot
                # because the Database is last in the stack and the MainInterface is
                # first
                dbi.prepRestartRun(dbCycle, dbNode)
            except:
                runLog.error(
                    "Could not load the initial state as requested. DB `{}` does "
                    "not exist or does not have enough time steps to load this time "
                    "(cycle={}, tn={})"
                    "".format(self.cs["reloadDBName"], dbCycle, dbNode))
                raise
            self.r.p.cycle = self.cs["startCycle"]
            self.r.p.timeNode = self.cs["startNode"]
Beispiel #5
0
    def _isTopDummyBlockPresent(self):
        """determines if top most block of assembly is a dummy block

        Notes
        -----
        - If true, then axial expansion will be physical for all blocks.
        - If false, the top most block in the assembly is artificially chopped
          to preserve the assembly height. A runLog.Warning also issued.
        """
        blkLst = self.linked.a.getBlocks()
        if not blkLst[-1].hasFlags(Flags.DUMMY):
            runLog.warning(
                "No dummy block present at the top of {0}! "
                "Top most block will be artificially chopped "
                "to preserve assembly height".format(self.linked.a)
            )
            if self._detailedAxialExpansion:
                msg = "Cannot run detailedAxialExpansion without a dummy block at the top of the assembly!"
                runLog.error(msg)
                raise RuntimeError(msg)
Beispiel #6
0
def deprecateAsymptoticExtrapolationPowerIters(_cs, _name, _value):
    """
    The setting `asymptoticExtrapolationPowerIters` has been deprecated and replaced by
    three settings to remove confusion and ensure proper use.

    The three new settings are:
        - numOuterIterPriorAsympExtrap
        - asympExtrapOfOverRelaxCalc
        - asympExtrapOfNodalCalc
    """
    runLog.error(
        "The setting `asymptoticExtrapolationPowerIters` has been deprecated and replaced "
        "with `numOuterIterPriorAsympExtrap`, `asympExtrapOfOverRelaxCalc`, "
        "`asympExtrapOfNodalCalc`. Please use these settings for intended behavior."
    )

    raise ValueError(
        "Setting `asymptoticExtrapolationPowerIters` has been deprecated. "
        "See stdout for more details."
    )
Beispiel #7
0
    def _applySettings(self, name, val):
        nameToSet, renamed = self._renamer.renameSetting(name)

        settingsToApply = self.applyConversions(nameToSet, val)
        for settingName, value in settingsToApply.items():
            if settingName not in self.cs.settings:
                self.invalidSettings.add(settingName)
            else:
                # apply validations
                settingObj = self.cs.settings[settingName]
                if value:
                    value = applyTypeConversions(settingObj, value)
                try:
                    value = settingObj.schema(value)
                except:
                    runLog.error(
                        f"Validation error with setting: {settingName} = {repr(value)}"
                    )
                    raise
                self.cs[settingName] = value
Beispiel #8
0
def _registerUserPlugin(plugManager, userPluginName):
    """Register one individual user plugin by name."""
    try:
        pluginMod = importlib.import_module(userPluginName)
    except ImportError:
        runLog.error(
            f"The plugin `{userPluginName}` could not be imported. Verify it is installed "
            "in your current environment or adjust the active user plugins."
        )
        raise

    # Each plugin must have a constant called PLUGIN pointing to the plugin class.
    # This allows discoverability without being overly restrictive in class names
    try:
        plugManager.register(pluginMod.PLUGIN)
    except AttributeError:
        runLog.error(
            f"The plugin `{userPluginName}` does not have a PLUGIN constant defined. "
            "This constant is required in user plugins. Please adjust plugin."
        )
        raise
Beispiel #9
0
def coverageReportHelper(config, dataPaths):
    """
    Small utility function to generate coverage reports.

    This was created to side-step the difficulties in submitting multi-line python
    commands on-the-fly.

    This combines data paths and then makes html and xml reports for the
    fully-combined result.
    """
    from coverage import Coverage
    import coverage

    try:
        cov = Coverage(config_file=config)
        if dataPaths:
            # fun fact: if you combine when there's only one file, it gets deleted.
            cov.combine(data_paths=dataPaths)
            cov.save()
        else:
            cov.load()
        cov.html_report()
        cov.xml_report()
    except PermissionError as e:
        # Some file systems have some issues with filenames that start with a '.', such as the
        # .coverage files. If a permissions error is raised, it likely has something to
        # do with that. We changed the COVERAGE_RESULTS_FILE in cases.py for this reason.
        runLog.error(f"There was an issue in generating coverage reports due "
                     f"to the following permissions error: {e}")
        # disabled until we figure out the problem.
        # raise
    except coverage.misc.CoverageException as e:
        # This is happening when forming the unit test coverage report. This may be
        # caused by the TestFixture coverage report gobbling up all of the coverage
        # files before the UnitTests.cov_report task gets a chance to see them. It may
        # simply be that we dont want a coverage report generated for the TestFixture.
        # Something to think about. Either way, we do not want to fail the job just
        # because of this
        runLog.error("There was an issue generating coverage reports "
                     "({}):\n{}".format(type(e), e.args))
Beispiel #10
0
    def invoke(self):
        from armi.bookkeeping.db.database3 import Database3

        db = Database3(self.args.h5db, "r")

        with db:
            settings, geom, bp = db.readInputsFromDB()

        settingsExt = ".yaml"
        if settings.lstrip()[0] == "<":
            settingsExt = ".xml"

        settingsPath = self.args.output_base + "_settings" + settingsExt
        bpPath = self.args.output_base + "_blueprints.yaml"

        geomPath = None
        if geom:
            geomExt = ".xml" if geom.lstrip()[0] == "<" else ".yaml"
            geomPath = self.args.output_base + "_geom" + geomExt

        bail = False
        for path in [settingsPath, bpPath, geomPath]:
            if os.path.exists(settingsPath):
                runLog.error("`{}` already exists. Aborting.".format(path))
                bail = True
        if bail:
            return -1

        for path, data, inp in [
            (settingsPath, settings, "settings"),
            (bpPath, bp, "blueprints"),
            (geomPath, geom, "geometry"),
        ]:
            if path is None:
                continue
            runLog.info("Writing {} to `{}`".format(inp, path))
            if isinstance(data, bytes):
                data = data.decode()
            with open(path, "w") as f:
                f.write(data)
Beispiel #11
0
def loadFromCs(cs):
    """
    Function to load Blueprints based on supplied ``CaseSettings``.
    """
    # pylint: disable=import-outside-toplevel; circular import protection
    from armi.utils import directoryChangers

    with directoryChangers.DirectoryChanger(cs.inputDirectory,
                                            dumpOnException=False):
        with open(cs["loadingFile"], "r") as bpYaml:
            root = pathlib.Path(cs["loadingFile"]).parent.absolute()
            bpYaml = textProcessors.resolveMarkupInclusions(bpYaml, root)
            try:
                bp = Blueprints.load(bpYaml)
            except yamlize.yamlizing_error.YamlizingError as err:
                if "cross sections" in err.args[0]:
                    runLog.error(
                        "The loading file {} contains invalid `cross sections` input. "
                        "Please run the `modify` entry point on this case to automatically convert."
                        "".format(cs["loadingFile"]))
                raise
    return bp
    def invoke(self):
        if shutil.which(self.cs[CONF_DIF3D_PATH]) is None:
            runLog.error(
                "The requested DIF3D executable, `{}` cannot be found".format(
                    self.cs[CONF_DIF3D_PATH]
                )
            )
            sys.exit(1)

        if shutil.which(self.cs[CONF_DRAGON_PATH]) is None:
            runLog.error(
                "The requested DRAGON executable, `{}` cannot be found".format(
                    self.cs[CONF_DRAGON_PATH]
                )
            )
            sys.exit(1)

        if not self.args.post_process:
            suite = self._execute()
        else:
            suite = None
        self._postProcess(suite)
Beispiel #13
0
def factory(shape, bcomps, kwargs):
    """
    Build a new component object.

    Parameters
    ---------
    shape : str
        lowercase string corresponding to the component type name

    bcomps : list(Component)
        list of "sibling" components. This list is used to find component links, which are of the form
        ``<name>.<dimension``.

    kwargs : dict
        dictionary of inputs for the Component subclass's ``__init__`` method.
    """
    try:
        class_ = ComponentType.TYPES[shape]
    except KeyError:
        raise ValueError(
            "Unrecognized component shape: '{}'\n"
            "Valid component names are {}".format(
                shape, ", ".join(ComponentType.TYPES.keys())
            )
        )

    _removeDimensionNameSpaces(kwargs)

    try:
        return class_(components=bcomps, **kwargs)
    except TypeError:
        # TypeError raised when kwarg is missing. We add extra information
        # to the error to indicate which component needs updating.
        runLog.error(
            f"Potentially invalid kwargs {kwargs} for {class_} of shape {shape}."
            " Check input."
        )
        raise
Beispiel #14
0
 def readWrite(self):
     runLog.info(
         "{} LABELS data {}".format(
             "Reading" if "r" in self._fileMode else "Writing", self
         )
     )
     try:
         self._rwFileID()
         self._rw1DRecord()
         self._rw2DRecord()
         if (
             self._metadata["numHalfHeightsDirection1"] > 0
             or self._metadata["numHalfHeightsDirection2"] > 0
         ):
             self._rw3DRecord()
         if self._metadata["numNuclideSets"] > 1:
             self._rw4DRecord()
         if self._metadata["numZoneAliases"] > 0:
             self._rw5DRecord()
         if self._metadata["numControlRodBanks"] > 0:
             runLog.error("Control-rod data has not been implemented")
             self._rw6DRecord()
             self._rw7DRecord()
             self._rw8DRecord()
         if self._metadata["numBurnupDependentIsotopes"] > 0:
             runLog.error("Burnup-dependent isotopes has not been implemented")
             self._rw9DRecord()
         if self._metadata["maxBurnupDependentGroups"] > 0:
             runLog.error("Burnup-dependent groups has not been implemented")
             self._rw10DRecord()
         if self._metadata["maxBurnupPolynomialOrder"] > 0:
             runLog.error(
                 "Burnup-dependent fitting coefficients has not been implemented"
             )
             self._rw11DRecord()
     except:
         runLog.error(traceback.format_exc())
         raise IOError("Failed to read/write LABELS file")
Beispiel #15
0
    def getNuclide(self, nucName, suffix):
        """
        Get a nuclide object from the XS library or None.

        Parameters
        ----------
        nucName : str
            ARMI nuclide name, e.g. 'U235', 'PU239'
        suffix : str
            Restrict to a specific nuclide lib suffix e.g. 'AA'

        Returns
        -------
        nuclide : Nuclide object
            A nuclide from the library or None
        """

        libLabel = nuclideBases.byName[nucName].label + suffix
        try:
            return self[libLabel]
        except KeyError:
            runLog.error("Error in {}.\nSee stderr.".format(self))
            raise
Beispiel #16
0
def loadFromCs(cs):
    """
    Function to load Blueprints based on supplied ``CaseSettings``.
    """
    # pylint: disable=import-outside-toplevel; circular import protection
    from armi.utils import directoryChangers

    textProcessors.registerYamlIncludeConstructor()

    with directoryChangers.DirectoryChanger(cs.inputDirectory):
        with open(cs["loadingFile"], "r") as bpYaml:
            # Make sure that the !include constructor is registered
            bpYaml = textProcessors.resolveMarkupInclusions(bpYaml)
            try:
                bp = Blueprints.load(bpYaml)
            except yamlize.yamlizing_error.YamlizingError as err:
                if "cross sections" in err.args[0]:
                    runLog.error(
                        "The loading file {} contains invalid `cross sections` input. "
                        "Please run the `modify` entry point on this case to automatically convert."
                        "".format(cs["loadingFile"]))
                raise
    return bp
Beispiel #17
0
    def _checkAssemblyAreaConsistency(self, cs):
        references = None
        for a in self.assemblies.values():
            if references is None:
                references = (a, a.getArea())
                continue

            assemblyArea = a.getArea()
            if isinstance(a, assemblies.RZAssembly):
                # R-Z assemblies by definition have different areas, so skip the check
                continue
            if abs(references[1] - assemblyArea) > 1e-9:
                runLog.error("REFERENCE COMPARISON ASSEMBLY:")
                references[0][0].printContents()
                runLog.error("CURRENT COMPARISON ASSEMBLY:")
                a[0].printContents()
                raise InputError(
                    "Assembly {} has a different area {} than assembly {} {}.  Check inputs for accuracy"
                    .format(a, assemblyArea, references[0], references[1]))

            blockArea = a[0].getArea()
            for b in a[1:]:
                if (abs(b.getArea() - blockArea) / blockArea >
                        cs["acceptableBlockAreaError"]):
                    runLog.error("REFERENCE COMPARISON BLOCK:")
                    a[0].printContents(includeNuclides=False)
                    runLog.error("CURRENT COMPARISON BLOCK:")
                    b.printContents(includeNuclides=False)

                    for c in b.getChildren():
                        runLog.error("{0} area {1} effective area {2}"
                                     "".format(c, c.getArea(),
                                               c.getVolume() / b.getHeight()))

                    raise InputError(
                        "Block {} has a different area {} than block {} {}. Check inputs for accuracy"
                        .format(b, b.getArea(), a[0], blockArea))
Beispiel #18
0
    def getThermalExpansionFactor(self, Tc=None, T0=None):
        """
        Retrieves the material thermal expansion fraction.

        Parameters
        ----------
        Tc : float, optional
            Adjusted temperature to get the thermal expansion factor at relative to the reference temperature

        Returns
        -------
        Thermal expansion factor as a percentage (1.0 + dLL), where dLL is the linear expansion factor.
        """
        if isinstance(self.material, (material.Fluid, custom.Custom)):
            return 1.0  # No thermal expansion of fluids or custom materials

        if T0 is None:
            T0 = self.inputTemperatureInC
        if Tc is None:
            Tc = self.temperatureInC

        dLL = self.material.linearExpansionFactor(Tc=Tc, T0=T0)
        if not dLL and abs(Tc - T0) > self._TOLERANCE:
            runLog.error(
                "Linear expansion percent may not be implemented in the {} material class.\n"
                "This method needs to be implemented on the material to allow thermal expansion."
                ".\nReference temperature: {}, Adjusted temperature: {}, Temperature difference: {}, "
                "Specified tolerance: {}".format(self.material,
                                                 T0,
                                                 Tc, (Tc - T0),
                                                 self._TOLERANCE,
                                                 single=True))
            raise RuntimeError(
                "Linear expansion percent may not be implemented in the {} material "
                "class.".format(self.material))
        return 1.0 + dLL
    def _checkValidWeightingFactors(self):
        """
        Verify the validity of the weighting parameter.

        .. warning:: Don't mix unweighted blocks (flux=0) w/ weighted ones
        """
        if self.weightingParam is None:
            weights = [0.0] * len(self.getCandidateBlocks())
        else:
            weights = [
                block.p[self.weightingParam] for block in self.getCandidateBlocks()
            ]
        anyNonZeros = any(weights)
        if anyNonZeros and not all(weights):
            # we have at least one non-zero entry and at least one zero. This is bad.
            # find the non-zero ones for debugging
            zeros = [block for block in self if not block.p[self.weightingParam]]
            runLog.error(
                "Blocks with zero `{0}` include: {1}".format(self.weightingParam, zeros)
            )
            raise ValueError(
                "{0} has a mixture of zero and non-zero weighting factors (`{1}`)\n"
                "See stdout for details".format(self, self.weightingParam)
            )
Beispiel #20
0
    def parse(self, args):
        """
        Process user input.

        Strings are parsed against some regular expressions and saved back to their
        original locations in the ``self.args`` namespace for later use.
        """
        entryPoint.EntryPoint.parse(self, args)

        cycleNodePattern = r"\((\d+),(\d+)\)"

        if self.args.nodes is not None:
            self.args.nodes = [(int(cycle), int(node))
                               for cycle, node in re.findall(
                                   cycleNodePattern, self.args.nodes)]

        if self.args.max_node is not None:
            nodes = re.findall(cycleNodePattern, self.args.max_node)
            if len(nodes) != 1:
                runLog.error(
                    "Bad --max-node: `{}`. Should look like (c,n).".format(
                        self.args.max_node))
                sys.exit(1)
            cycle, node = nodes[0]
            self.args.max_node = (int(cycle), int(node))

        if self.args.min_node is not None:
            nodes = re.findall(cycleNodePattern, self.args.min_node)
            if len(nodes) != 1:
                runLog.error(
                    "Bad --min-node: `{}`. Should look like (c,n).".format(
                        self.args.min_node))
                sys.exit(1)
            cycle, node = nodes[0]
            self.args.min_node = (int(cycle), int(node))

        if self.args.format not in self._SUPPORTED_FORMATS:
            runLog.error(
                "Requested format `{}` not among the supported options: {}".
                format(self.args.format, self._SUPPORTED_FORMATS))
            sys.exit(1)

        if self.args.output_name is None:
            # infer name from input
            inp = pathlib.Path(self.args.h5db)
            self.args.output_name = inp.stem
Beispiel #21
0
 def __exit__(self, exception_type, exception_value, stacktrace):
     if any([exception_type, exception_value, stacktrace]):
         runLog.error(r"{}\n{}\{}".format(exception_type, exception_value,
                                          stacktrace))
         self.interactAllError()
Beispiel #22
0
 def checkSize(self):
     if not len(self.E) == len(self.avgFlux):
         runLog.error(self.avgFlux)
         raise
Beispiel #23
0
def computeMacroscopicGroupConstants(
    constantName,
    numberDensities,
    lib,
    microSuffix,
    libType=None,
    multConstant=None,
    multLib=None,
):
    """
    Compute any macroscopic group constants given number densities and a microscopic library.

    Parameters
    ----------
    constantName : str
        Name of the reaction for which to obtain the group constants. This name should match a
        cross section name or an attribute in the collection.

    numberDensities : dict
        nucName keys, number density values (atoms/bn-cm) of all nuclides in the composite for which
        the macroscopic group constants are computed. See composite `getNuclideNumberDensities` method.

    lib : library object
        Microscopic cross section library.

    microSuffix : str
        Microscopic library suffix (e.g. 'AB') for this composite.
        See composite `getMicroSuffix` method.

    libType : str, optional
        The block attribute containing the desired microscopic XS for this block:
        either "micros" for neutron XS or "gammaXS" for gamma XS.

    multConstant : str, optional
        Name of constant by which the group constants will be multiplied. This name should match a
        cross section name or an attribute in the collection.

    multLib : library object, optional
        Microscopic cross section nuclide library to obtain the multiplier from.
        If None, same library as base cross section is used.

    Returns
    -------
    macroGroupConstant : numpy array
        Macroscopic group constants for the requested reaction.
    """
    skippedNuclides = []
    skippedMultNuclides = []
    macroGroupConstants = None

    # sort the numberDensities because a summation is being performed that may result in slight
    # differences based on the order.
    for nuclideName, numberDensity in sorted(numberDensities.items()):
        if not numberDensity:
            continue
        try:
            libNuclide = lib.getNuclide(nuclideName, microSuffix)
            multLibNuclide = libNuclide
        except KeyError:
            skippedNuclides.append(nuclideName)  # Nuclide does not exist in the library
            continue

        if multLib:
            try:
                multLibNuclide = multLib.getNuclide(nuclideName, microSuffix)
            except KeyError:
                skippedMultNuclides.append(
                    nuclideName
                )  # Nuclide does not exist in the library
                continue

        microGroupConstants = _getMicroGroupConstants(
            libNuclide, constantName, nuclideName, libType
        )

        multiplierVal = _getXsMultiplier(multLibNuclide, multConstant, libType)

        if macroGroupConstants is None:
            macroGroupConstants = numpy.zeros(microGroupConstants.shape)

        if (
            microGroupConstants.shape != macroGroupConstants.shape
            and not microGroupConstants.any()
        ):
            microGroupConstants = numpy.zeros(macroGroupConstants.shape)

        macroGroupConstants += (
            numpy.asarray(numberDensity) * microGroupConstants * multiplierVal
        )

    if skippedNuclides:
        msg = "The following nuclides are not in microscopic library {}: {}".format(
            lib, skippedNuclides
        )
        runLog.error(msg, single=True)
        raise ValueError(msg)

    if skippedMultNuclides:
        runLog.debug(
            "The following nuclides are not in multiplier library {}: {}".format(
                multLib, skippedMultNuclides
            ),
            single=True,
        )

    return macroGroupConstants
Beispiel #24
0
    def makeAssemWithUniformMesh(sourceAssem, newMesh):
        """
        Build new assembly based on a source assembly but apply the uniform mesh.

        The new assemblies must have appropriately mapped number densities as
        input for a neutronics solve. They must also have other relevant
        state parameters for follow-on steps. Thus, this maps many parameters
        from the ARMI mesh to the uniform mesh.

        See Also
        --------
        applyStateToOriginal : basically the reverse on the way out.
        """
        newAssem = UniformMeshGeometryConverter._createNewAssembly(sourceAssem)
        runLog.debug(f"Creating a uniform mesh of {newAssem}")
        bottom = 0.0
        for topMeshPoint in newMesh:
            runLog.debug(
                f"From axial elevation {bottom:<6.2f} cm to {topMeshPoint:<6.2f} cm"
            )
            overlappingBlockInfo = sourceAssem.getBlocksBetweenElevations(
                bottom, topMeshPoint
            )
            # This is not expected to occur given that the assembly mesh is consistent with
            # the blocks within it, but this is added for defensive programming and to
            # highlight a developer issue.
            if not overlappingBlockInfo:
                raise ValueError(
                    f"No blocks found between {bottom:.3f} and {topMeshPoint:.3f} in {sourceAssem}. "
                    f"This is a major bug that should be reported to the developers."
                )

            # Iterate over the blocks that are within this region and
            # select one as a "source" for determining which cross section
            # type to use. This uses the following rules:
            #     1. Select the first block that has either FUEL or CONTROL flags
            #     2. Fail if multiple blocks meet this criteria if they have different XS types
            #     3. Default to the first block in the list if no blocks meet FUEL or CONTROL flags criteria.
            blocks = [b for b, _h in overlappingBlockInfo]
            sourceBlock = None
            xsType = None
            for b in blocks:
                if b.hasFlags([Flags.FUEL, Flags.CONTROL]):
                    if sourceBlock is None:
                        sourceBlock = b
                        xsType = b.p.xsType
                    else:
                        # If there is a duplicate source block candidate that has a different
                        # cross section type then this is an error because the code cannot
                        # decide which one is correct.
                        if b.p.xsType != xsType:
                            msg = (
                                f"{sourceBlock} and {b} in {newAssem} have conflicting XS types and are "
                                f"candidates for the source block. To fix this, either set their XS types "
                                f"to be the same or remove these flags {[Flags.FUEL, Flags.CONTROL]} "
                                f"from one of the blocks."
                            )
                            runLog.error(msg)
                            raise ValueError(msg)

            # If no blocks meet the criteria above just select the first block
            # as the source block and use its cross section type.
            if sourceBlock is None:
                sourceBlock = blocks[0]
                xsType = blocks[0].p.xsType

            runLog.debug(
                f"  - The source block for this region is {sourceBlock} with XS type {xsType}"
            )

            # Report the homogenization fractions for debugging purposes
            for b, heightOverlap in overlappingBlockInfo:
                totalHeight = topMeshPoint - bottom
                runLog.debug(
                    f"  - {b} accounts for {heightOverlap/totalHeight * 100.0:<5.2f}% of the homogenized region"
                )

            block = copy.deepcopy(sourceBlock)
            block.p.xsType = xsType
            block.setHeight(topMeshPoint - bottom)
            block.p.axMesh = 1
            _setNumberDensitiesFromOverlaps(block, overlappingBlockInfo)
            newAssem.add(block)
            bottom = topMeshPoint

        newAssem.reestablishBlockOrder()
        newAssem.calculateZCoords()
        return newAssem
Beispiel #25
0
def recursivelyLoadSettingsFiles(
    rootDir,
    patterns: List[str],
    recursive=True,
    ignorePatterns: List[str] = None,
    handleInvalids=True,
):
    """
    Scans path for valid xml files and returns their paths.

    Parameters
    ----------
    rootDir : str
        The base path to scan for settings files
    patterns : list
        file patterns to match file names
    recursive : bool (optional)
        load files recursively
    ignorePatterns : list (optional)
        list of filename patterns to ignore
    handleInvalids : bool
        option to suppress errors generated when finding files that appear to be settings files but fail to load. This
        may happen when old settings are present.

    Returns
    -------
    csFiles : list
        list of :py:class:`~armi.settings.caseSettings.Settings` objects.
    """
    assert not isinstance(
        ignorePatterns,
        str), "Bare string passed as ignorePatterns. Make sure to pass a list"

    assert not isinstance(
        patterns,
        str), "Bare string passed as patterns. Make sure to pass a list"

    possibleSettings = []
    runLog.info(
        "Finding potential settings files matching {}.".format(patterns))
    if recursive:
        for directory, _list, files in os.walk(rootDir):
            matches = set()
            for pattern in patterns:
                matches |= set(fnmatch.filter(files, pattern))
            if ignorePatterns is not None:
                for ignorePattern in ignorePatterns:
                    matches -= set(fnmatch.filter(files, ignorePattern))
            possibleSettings.extend(
                [os.path.join(directory, fname) for fname in matches])
    else:
        for pattern in patterns:
            possibleSettings.extend(glob.glob(pattern))

    csFiles = []
    runLog.info("Checking for valid settings files.")
    for possibleSettingsFile in possibleSettings:
        if os.path.getsize(possibleSettingsFile) > 1e6:
            runLog.info(
                "skipping {} -- looks too big".format(possibleSettingsFile))
            continue
        try:
            cs = Settings()
            cs.loadFromInputFile(possibleSettingsFile,
                                 handleInvalids=handleInvalids)
            csFiles.append(cs)
            runLog.extra("loaded {}".format(possibleSettingsFile))
        except exceptions.InvalidSettingsFileError as ee:
            runLog.info("skipping {}\n    {}".format(possibleSettingsFile, ee))
        except yaml.composer.ComposerError as ee:
            runLog.info(
                "skipping {}; it appears to be an incomplete YAML snippet\n    {}"
                .format(possibleSettingsFile, ee))
        except Exception as ee:
            runLog.error(
                "Failed to parse {}.\nIt looked like a settings file but gave this exception:\n{}: {}"
                .format(possibleSettingsFile,
                        type(ee).__name__, ee))
            raise
    csFiles.sort(key=lambda csFile: csFile.caseTitle)
    return csFiles
Beispiel #26
0
    def workerOperate(self):
        """
        The main loop on any worker MPI nodes.

        Notes
        -----
        This method is what worker nodes are in while they wait for instructions from
        the master node in a parallel run. The nodes will sit, waiting for a "worker
        command". When this comes (from a bcast from the master), a set of if statements
        are evaluated, with specific behaviors defined for each command. If the operator
        doesn't understand the command, it loops through the interface stack to see if
        any of the interfaces understand it.

        Originally, "magic strings" were broadcast, which were handled either here or in
        one of the interfaces' ``workerOperate`` methods. Since then, the
        :py:mod:`~armi.mpiActions` system has been devised which just broadcasts
        ``MpiAction`` objects. Both methods are still supported.

        See Also
        --------
        armi.mpiActions : MpiAction information
        armi.interfaces.workerOperate : interface-level handling of worker commands.

        """
        while True:
            # sit around waiting for a command from the master
            runLog.extra("Node {0} ready and waiting".format(armi.MPI_RANK))
            cmd = armi.MPI_COMM.bcast(None, root=0)
            runLog.extra("worker received command {0}".format(cmd))
            # got a command. go use it.
            if isinstance(cmd, mpiActions.MpiAction):
                cmd.invoke(self, self.r, self.cs)
            elif cmd == "quit":
                self.workerQuit()
                break  # If this break is removed, the program will remain in the while loop forever.
            elif cmd == "finished":
                runLog.warning(
                    "Received unexpected FINISHED command. Usually a QUIT command precedes this. "
                    "Skipping cleanup of temporary files.")
                break
            elif cmd == "sync":
                # wait around for a sync
                runLog.debug("Worker syncing")
                note = armi.MPI_COMM.bcast("wait", root=0)
                if note != "wait":
                    raise RuntimeError(
                        'did not get "wait". Got {0}'.format(note))
            else:
                # we don't understand the command on our own. check the interfaces
                # this allows all interfaces to have their own custom operation code.
                handled = False
                for i in self.interfaces:
                    handled = i.workerOperate(cmd)
                    if handled:
                        break
                if not handled:
                    if armi.MPI_RANK == 0:
                        print("Interfaces" + str(self.interfaces))
                    runLog.error(
                        "No interface understood worker command {0}\n check stdout for err\n"
                        "available interfaces:\n  {1}".format(
                            cmd,
                            "\n  ".join("name:{} typeName:{} {}".format(
                                i.name, i.function, i)
                                        for i in self.interfaces),
                        ))
                    raise RuntimeError(
                        "Failed to delegate worker command {} to an interface."
                        .format(cmd))

            if self._workersShouldResetAfter(cmd):
                # clear out the reactor on the workers to start anew.
                # Note: This should build empty non-core systems too.
                xsGroups = self.getInterface("xsGroups")
                if xsGroups:
                    xsGroups.clearRepresentativeBlocks()
                cs = settings.getMasterCs()
                bp = self.r.blueprints
                spatialGrid = self.r.core.spatialGrid
                self.detach()
                self.r = reactors.Reactor(cs, bp)
                core = reactors.Core("Core", cs)
                self.r.add(core)
                core.spatialGrid = spatialGrid
                self.reattach(self.r, cs)

            # might be an mpi action which has a reactor and everything, preventing
            # garbage collection
            del cmd
            gc.collect()
Beispiel #27
0
        table = []  # tuples (case, hasIssues, hasErrors)
        for case in suite:
            hasIssues = "UNKNOWN"
            if not self.args.skip_checks:
                hasIssues = "PASSED" if case.checkInputs() else "HAS ISSUES"
            try:
                if self.args.generate_design_summary:
                    case.summarizeDesign(
                        self.args.full_core_map, not self.args.disable_block_axial_mesh
                    )
                    canStart = "PASSED"
                else:
                    canStart = "UNKNOWN"
            except Exception as ee:
                runLog.error("Failed to initialize/summarize {}".format(case))
                runLog.error(traceback.format_exc())
                canStart = "FAILED"

            table.append((case.cs.path, case.title, canStart, hasIssues))

        runLog.important(
            tabulate.tabulate(
                table,
                headers=["case", "can start", "input is self consistent"],
                tablefmt="armi",
            )
        )

        if any(t[2] != "PASSED" or t[3] != "PASSED" for t in table):
            sys.exit(-1)
Beispiel #28
0
        Run each case, one after the other.

        .. warning: Suite running may not work yet if the cases have interdependencies.
                    We typically run on a HPC but are still working on a platform
                    independent way of handling HPCs.

        """
        for ci, case in enumerate(self):
            runLog.important(f"Running case {ci+1}/{len(self)}: {case}")
            with directoryChangers.DirectoryChanger(case.directory):
                settings.setMasterCs(case.cs)
                try:
                    case.run()
                except:  # pylint: disable=bare-except; allow it at this level to run all cases
                    # allow all errors and continue to next run
                    runLog.error(f"{case} failed during execution.")
                    traceback.print_exc()

    def compare(
        self,
        that,
        exclusion: Optional[Sequence[str]] = None,
        weights=None,
        tolerance=0.01,
        timestepMatchup=None,
    ) -> int:
        """
        Compare one case suite with another.

        Returns
        -------
Beispiel #29
0
    def _createParamDatasets(  # pylint: disable=too-many-branches
        self, timeStepName, armiObjects
    ):
        """Qualified name of group1/group2/.../groupN/dataset and data dictionary to be
        stored

        Stores each dictionary key as its own dataset corresponding to an array of
        values

        All None's are stripped out and represented by a positional marking attribute on
        the generated dataset.  This is because None's are an incompatible python object
        with HDF's more primitive datatypes and this was deemed the most elegant
        solution.

        If the input structures contain lists or ndarrays they're treated as if they
        could contain jagged lengths which is similarly incompatible as it casts the
        dtype of the stored array to the impossible "object" dtype.  The jagged arrays
        are padded out and the real length stored in the marking attribute.

        To maintain fidelity with input data, any generated datasets should be read out
        by the accompanying reader method ``_get_1d_dataset``
        """
        compType = type(armiObjects[0])

        paramDefs = armiObjects[0].p.paramDefs.toWriteToDB(parameters.SINCE_ANYTHING)
        for paramDef in paramDefs:
            datasetName = r"{}/{}".format(timeStepName, paramDef.name)

            try:
                paramName = paramDef.name  # quick lookup
                values = []
                hasValue = numpy.repeat(True, len(armiObjects))

                for ic, armiObject in enumerate(armiObjects):
                    val = armiObject.p.get(paramName, paramDef.default)
                    if val is None or val is parameters.NoDefault:
                        hasValue[ic] = False
                    else:
                        values.append(val)

                jagged = False
                if values and isinstance(values[0], (list, numpy.ndarray)):
                    lengths = [len(v) for v in values]
                    jagged = any(l != lengths[0] for l in lengths)
                    if jagged:
                        values = [list(v) for v in values]
                        padTo = max(lengths)
                        dummyValue = next(
                            iter(v[0] for l, v in zip(lengths, values) if l != 0)
                        )
                        # use of a dummyValue b/c some lists in need of padding may not have their own element
                        # to pad with
                        for _l, _v in zip(lengths, values):
                            _v.extend([dummyValue] * (padTo - _l))
                    convertedValues = []
                    for valueItem in values:
                        convertedValueItem = numpy.array(valueItem)
                        if convertedValueItem.dtype.kind == "U":
                            # hdf5 can't handle unicode arrays. Convert to bytes
                            convertedValueItem = convertedValueItem.astype("S")
                        convertedValues.append(convertedValueItem)
                    values = convertedValues
                else:
                    # handle values that are just unicode (such as xsType)
                    values = numpy.array(values)
                    if values.dtype.kind == "U":
                        # hdf5 can't handle unicode arrays. Convert to bytes
                        values = values.astype("S")

                try:
                    self._hdf_file.create_dataset(
                        datasetName, data=values, compression="gzip"
                    )
                except RuntimeError:
                    # HDF5 does not delete from disk
                    del self._hdf_file[datasetName]
                    self._hdf_file.create_dataset(
                        datasetName, data=values, compression="gzip"
                    )

                paramDef.assigned &= ~parameters.SINCE_LAST_DB_TRANSMISSION

                if not all(hasValue):
                    self._hdf_file[datasetName].attrs[self._NONE_ATTR] = hasValue

                if jagged:
                    self._hdf_file[datasetName].attrs[self._JAGGED_ATTR] = lengths

            except Exception as e:  # pylint: disable=broad-except, invalid-name
                if type(armiObjects[0] is reactors.Core and paramName == "serialNum"):
                    continue
                traceback.print_exc()  # print actual traceback and then add more info
                runLog.error("Caught exception: {}".format(e))
                raise ValueError(
                    "Cannot write to database for parameter '{}' ({}) with values: {}\n\n"
                    "Please ensure the data are well-formed and consistently typed.".format(
                        paramDef, datasetName, values
                    )
                )
Beispiel #30
0
    def _create_1d_datasets(self, name, data):  # pylint: disable=too-many-branches
        """Qualified name of group1/group2/.../groupN/dataset and data dictionary to be stored

        Stores each dictionary key as its own dataset corresponding to an array of values

        All None's are stripped out and represented by a positional marking attribute on the generated dataset.
        This is because None's are an incompatible python object with HDF's more primitive datatypes and this
        was deemed the most elegant solution.

        If the input structures contain lists or ndarrays they're treated as if they could contain jagged lengths
        which is similarly incompatible as it casts the dtype of the stored array to the impossible "object" dtype.
        The jagged arrays are padded out and the real length stored in the marking attribute.

        To maintain fidelity with input data, any generated datasets
        should be read out by the accompanying reader method ``_get_1d_dataset``

        """
        for key, values in data.items():
            try:
                cur_name = r"{}/{}".format(name, key)

                nones = any(value is None for value in values)
                if nones:
                    hasValue = numpy.array([value is not None for value in values])
                    values = [value for value in values if not value is None]

                jagged = False
                if len(values) != 0 and isinstance(  # pylint: disable=len-as-condition
                    values[0], (list, numpy.ndarray)
                ):
                    # deal with list or array values
                    lengths = [len(v) for v in values]
                    jagged = any(length != lengths[0] for length in lengths)
                    if jagged:
                        values = [list(v) for v in values]
                        padTo = max(lengths)
                        dummyValue = next(
                            iter(v[0] for l, v in zip(lengths, values) if l != 0)
                        )
                        # use of a dummyValue b/c some lists in need of padding may not have their own element
                        # to pad with
                        for _l, _v in zip(lengths, values):
                            _v.extend([dummyValue] * (padTo - _l))

                    convertedValues = []
                    for valueItem in values:
                        convertedValueItem = numpy.array(valueItem)
                        if convertedValueItem.dtype.kind == "U":
                            # hdf5 can't handle unicode arrays. Convert to bytes
                            convertedValueItem = convertedValueItem.astype("S")
                        convertedValues.append(convertedValueItem)
                    values = convertedValues
                else:
                    # handle values that are just unicode (such as xsType)
                    values = numpy.array(values)
                    if values.dtype.kind == "U":
                        # hdf5 can't handle unicode arrays. Convert to bytes
                        values = values.astype("S")

                try:
                    self._hdf_file.create_dataset(
                        cur_name, data=values, compression="gzip"
                    )
                except RuntimeError:
                    # this can happen when updating an existing dataset.
                    del self._hdf_file[cur_name]
                    self._hdf_file.create_dataset(
                        cur_name, data=values, compression="gzip"
                    )
                except TypeError:
                    runLog.error(
                        "Failed to coerce data for {} into HDF5 dataset.".format(
                            cur_name
                        )
                    )
                    raise

                if nones:
                    self._hdf_file[cur_name].attrs[self._NONE_ATTR] = hasValue
                if jagged:
                    self._hdf_file[cur_name].attrs[self._JAGGED_ATTR] = lengths
            except:  # pylint: disable=broad-except
                traceback.print_exc()  # print actual traceback and then add more info
                if hasattr(values, "dtype"):
                    tp = values.dtype
                else:
                    tp = type(values)
                raise ValueError(
                    "Cannot write to database for parameter '{}' with values: {}\n\n"
                    "Please ensure the data are well-formed and consistently typed (they are {})..".format(
                        key, values, tp
                    )
                )