Пример #1
0
    def validParams():
        """
        `TestCase` objects are not designed for internal use and the `validParams` are only set when
        the objects are constructed, which occurs within the `moosetest.run` function. As such, only
        add parameters that you expect that function to set, since there is no configuration file
        based way for altering these.
        """
        params = MooseObject.validParams()
        params.add('runner',
                   vtype=Runner,
                   required=True,
                   mutable=False,
                   doc="The `Runner` object to execute.")
        params.add(
            'controllers',
            vtype=Controller,
            array=True,
            mutable=False,
            doc="`Controller` object(s) that dictate if the Runner should run."
        )

        params.add(
            'min_fail_state',
            vtype=TestCase.Result,
            mutable=False,
            default=TestCase.Result.TIMEOUT,
            doc=
            "The minimum state considered a failure for the entire test case.")
        return params
Пример #2
0
 def validParams():
     params = MooseObject.validParams()
     params.addParam('average_load', 64.0, "Average load to allow")
     params.addParam('max_processes', None, "Hard limit of maxium processes to use")
     params.addParam(
         'min_reported_time', 10,
         "The minimum time elapsed before a job is reported as taking to long to run.")
     return params
Пример #3
0
    def testInitAndName(self):
        obj = MooseObject()
        self.assertFalse(obj.isParamValid("name"))
        self.assertEqual(obj.name(), None)

        obj = MooseObject(name='Andrew')
        self.assertEqual(obj.name(), 'Andrew')
Пример #4
0
 def validParams():
     params = MooseObject.validParams()
     params.add(
         'progress_interval',
         default=10.,
         vtype=(int, float),
         mutable=False,
         doc="Number of seconds in between progress updates for a test case."
     )
     return params
Пример #5
0
    def __init__(self, *args, **kwargs):
        MooseObject.__init__(self, *args, **kwargs)

        self._parameters.set('prereq', self.getParam('prereq').split())
        self._parameters.set('cli_args', self.getParam('cli_args').split())

        self.specs = self._parameters
        self.outfile = None
        self.errfile = None
        self.joined_out = ''
        self.exit_code = 0
        self.process = None
        self.tags = self.specs['tags']
        self.__caveats = set([])

        # Alternate text we want to print as part of our status instead of the
        # pre-formatted status text (SYNTAX PASS instead of OK for example)
        self.__tester_message = ''

        # Bool if test can run
        self._runnable = None

        # Set up common paramaters
        self.should_execute = self.specs['should_execute']
        self.check_input = self.specs['check_input']

        if self.specs["allow_test_objects"]:
            self.specs["cli_args"].append("--allow-test-objects")

        ### Enumerate the tester statuses we want to use
        self.test_status = StatusSystem()
        self.no_status = self.test_status.no_status
        self.queued = self.test_status.queued
        self.skip = self.test_status.skip
        self.silent = self.test_status.silent
        self.success = self.test_status.success
        self.fail = self.test_status.fail
        self.diff = self.test_status.diff
        self.deleted = self.test_status.deleted

        self.__failed_statuses = [self.fail, self.diff, self.deleted]
        self.__skipped_statuses = [self.skip, self.silent]
Пример #6
0
 def validParams():
     params = MooseObject.validParams()
     params.add('par')
     params.add('par_int', vtype=int)
     params.add('par_float', vtype=float)
     params.add('par_str', vtype=str)
     params.add('par_bool', vtype=bool)
     params.add('vec_int', vtype=int, array=True)
     params.add('vec_float', vtype=float, array=True)
     params.add('vec_str', vtype=str, array=True)
     params.add('vec_bool', vtype=bool, array=True)
     return params
Пример #7
0
    def __init__(self, *args, **kwargs):
        MooseObject.__init__(self, *args, **kwargs)

        self._runner = self.getParam('runner')
        self._differs = self._runner.getParam('differs') or tuple()
        self._controllers = self.getParam('controllers') or tuple()
        self._min_fail_state = self.getParam('min_fail_state')
        self.parameters().setValue('name', self._runner.name())

        self.__results = None  # results from the Runner/Differ objects
        self.__progress = None  # execution progress of this TestCase
        self.__state = None  # the overall state (TestCase.Result)

        # The following are various time settings managed via the `setProgress` method
        self.__create_time = None  # time when the object was created
        self.__start_time = None  # time when progress change to running
        self.__execute_time = None  # duration of execution running to finished

        self.__unique_id = uuid.uuid4(
        )  # a unique identifier for this instance

        self.setProgress(TestCase.Progress.WAITING)
        TestCase.__TOTAL__ += 1
Пример #8
0
 def validParams():
     params = MooseObject.validParams()
     raise Exception('validParams failed')
     return params
Пример #9
0
 def __init__(self, *args, **kwargs):
     MooseObject.__init__(self, *args, **kwargs)
     raise Exception('__init__ failed')
Пример #10
0
 def __init__(self, *args, **kwargs):
     kwargs.setdefault('name', self.__class__.__name__)
     MooseObject.__init__(self, *args, **kwargs)
     self.__progress_time = dict()
     self.__progress_interval = self.getParam('progress_interval')
Пример #11
0
    def testIsParamValid(self):
        obj = MooseObject()
        self.assertFalse(obj.isParamValid('name'))

        obj = MooseObject(name='Andrew')
        self.assertTrue(obj.isParamValid('name'))
Пример #12
0
    def testLogs(self):
        msg = "This is a test: {}"

        obj = MooseObject(log_level='DEBUG')
        with self.assertLogs(level='DEBUG') as log:
            obj.debug(msg, 'DEBUG')
        self.assertEqual(len(log.output), 1)
        self.assertIn(msg.format('DEBUG'), log.output[0])

        obj = MooseObject()
        with self.assertLogs(level='INFO') as log:
            obj.info(msg, 'INFO')
        self.assertEqual(len(log.output), 1)
        self.assertIn(msg.format('INFO'), log.output[0])

        with self.assertLogs(level='WARNING') as log:
            obj.warning(msg, 'WARNING')
        self.assertEqual(len(log.output), 1)
        self.assertIn(msg.format('WARNING'), log.output[0])

        with self.assertLogs(level='ERROR') as log:
            obj.error(msg, 'ERROR')
        self.assertEqual(len(log.output), 1)
        self.assertIn(msg.format('ERROR'), log.output[0])

        with self.assertLogs(level='CRITICAL') as log:
            obj.critical(msg, 'CRITICAL')
        self.assertEqual(len(log.output), 1)
        self.assertIn(msg.format('CRITICAL'), log.output[0])

        with self.assertLogs(level='CRITICAL') as log:
            obj.critical(msg, 'CRITICAL', stack_info=True)
        self.assertEqual(len(log.output), 1)
        self.assertIn('Stack (most recent call last):', log.output[0])

        with self.assertLogs(level='CRITICAL') as log:
            try:
                raise MooseException("You messed up!")
            except MooseException:
                obj.exception(msg, 'CRITICAL')

        self.assertEqual(len(log.output), 1)
        self.assertIn(msg.format('CRITICAL'), log.output[0])

        with self.assertRaises(AssertionError) as e:
            obj.exception('You called exception wrong')
        self.assertEqual(
            "No Exception raised, see `MooseObject.exception` for help.",
            str(e.exception))

        with self.assertRaises(AssertionError) as e:
            obj.info(42)
        self.assertEqual(
            "The supplied 'message' must be a python `str` type, see `MooseObject.log`.",
            str(e.exception))
Пример #13
0
 def validParams():
     params = MooseObject.validParams()
     params.add("year")
     return params
Пример #14
0
    def testResetAndStatus(self):
        obj = MooseObject(
            _error_mode=parameters.InputParameters.ErrorMode.CRITICAL)
        self.assertEqual(obj.status(), 0)
        with self.assertLogs(level='ERROR'):
            obj.getParam('wrong')
        self.assertEqual(obj.status(), 1)
        obj.reset()
        self.assertEqual(obj.status(), 0)

        with self.assertLogs(level='ERROR') as log:
            obj.reset('WRONG')
        self.assertEqual(len(log.output), 1)
        self.assertIn("Attempting to reset logging count for 'WRONG'",
                      log.output[0])
Пример #15
0
    def testGetParam(self):
        obj = MooseObject()
        self.assertEqual(obj.getParam('name'), None)

        obj = MooseObject(name='Andrew')
        self.assertEqual(obj.getParam('name'), 'Andrew')

        with self.assertRaises(MooseException) as me:
            obj.getParam('wrong')
        self.assertEqual("The parameter 'wrong' does not exist.",
                         me.exception.message)

        with self.assertLogs(level='WARNING') as log:
            obj = MooseObject(
                _error_mode=parameters.InputParameters.ErrorMode.WARNING)
            obj.getParam('wrong')
        self.assertEqual(len(log.output), 1)
        self.assertIn("The parameter 'wrong' does not exist.", log.output[0])

        with self.assertLogs(level='ERROR') as log:
            obj = MooseObject(
                _error_mode=parameters.InputParameters.ErrorMode.ERROR)
            obj.getParam('wrong')
        self.assertEqual(len(log.output), 1)
        self.assertIn("The parameter 'wrong' does not exist.", log.output[0])

        with self.assertLogs(level='CRITICAL') as log:
            obj = MooseObject(
                _error_mode=parameters.InputParameters.ErrorMode.CRITICAL)
            obj.getParam('wrong')
        self.assertEqual(len(log.output), 1)
        self.assertIn("The parameter 'wrong' does not exist.", log.output[0])
Пример #16
0
 def testParameters(self):
     obj = MooseObject()
     self.assertIs(obj._parameters, obj.parameters())
Пример #17
0
    def __init__(self, harness, *args, **kwargs):
        MooseObject.__init__(self, *args, **kwargs)

        ## The test harness to run callbacks on
        self.harness = harness

        # Retrieve and store the TestHarness options for use in this object
        self.options = harness.getOptions()

        # The Scheduler class can be initialized with no "max_processes" argument and it'll default
        # to a soft limit. If however a max_processes is passed we'll treat it as a hard limit.
        # The difference is whether or not we allow single jobs to exceed the number of slots.
        if self.getParam('max_processes') == None:
            self.available_slots = 1
            self.soft_limit = True
        else:
            self.available_slots = self.getParam('max_processes')  # hard limit
            self.soft_limit = False

        self.average_load = self.getParam('average_load')

        self.min_report_time = self.getParam('min_reported_time')

        # Initialize run_pool based on available slots
        self.run_pool = ThreadPool(processes=self.available_slots)

        # Initialize status_pool to only use 1 process (to prevent status messages from getting clobbered)
        self.status_pool = ThreadPool(processes=1)

        # Slot lock when processing resource allocations and modifying slots_in_use
        self.slot_lock = threading.Lock()

        # Job lock when modifying a jobs status
        self.activity_lock = threading.Lock()

        # A combination of processors + threads (-j/-n) currently in use, that a job requires
        self.slots_in_use = 0

        # List of Lists containing all scheduled jobs
        self.__scheduled_jobs = []

        # Set containing jobs entering the run_pool
        self.__job_bank = set([])

        # Total running Job and Test failures encountered
        self.__failures = 0

        # Allow threads to set a global exception
        self.__error_state = False

        # Private set of jobs currently running
        self.__active_jobs = set([])

        # Jobs that are taking longer to finish than the alloted time are reported back early to inform
        # the user 'stuff' is still running. Jobs entering this set will not be reported again.
        self.jobs_reported = set([])

        # The last time the scheduler reported something
        self.last_reported_time = clock()

        # Sets of threading objects created by jobs entering and exiting the queues. When scheduler.waitFinish()
        # is called, and both thread pools are empty, the pools shut down, and the call to waitFinish() returns.
        self.__status_pool_lock = threading.Lock()
        self.__runner_pool_lock = threading.Lock()
        self.__status_pool_jobs = set([])
        self.__runner_pool_jobs = set([])

        # True when scheduler.waitFinish() is called. This alerts the scheduler, no more jobs are
        # to be scheduled. KeyboardInterrupts are then handled by the thread pools.
        self.__waiting = False
Пример #18
0
    def validParams():
        params = MooseObject.validParams()

        # Common Options
        #params.addRequiredParam('type', "The type of test of Tester to create for this test.")
        params.addParam(
            'max_time', int(os.getenv('MOOSE_TEST_MAX_TIME', 300)),
            "The maximum in seconds that the test will be allowed to run.")
        params.addParam('skip', "Provide a reason this test will be skipped.")
        params.addParam(
            'deleted',
            "Tests that only show up when using the '-e' option (Permanently skipped or not implemented)."
        )
        params.addParam('unique_test_id', "The unique hash given to a test")

        params.addParam(
            'heavy', False,
            "Set to True if this test should only be run when the '--heavy' option is used."
        )
        params.add('group',
                   vtype=str,
                   array=True,
                   doc="A list of groups for which this test belongs.")
        params.addParam(
            'prereq', "",
            "A list of prereq tests that need to run successfully before launching this test. When 'prereq = ALL', TestHarness will run this test last. Multiple 'prereq = ALL' tests, or tests that depend on a 'prereq = ALL' test will result in cyclic errors. Naming a test 'ALL' when using 'prereq = ALL' will also result in an error."
        )
        params.addParam(
            'skip_checks', False,
            "Tells the TestHarness to skip additional checks (This parameter is set automatically by the TestHarness during recovery tests)"
        )
        params.addParam('scale_refine', 0,
                        "The number of refinements to do when scaling")
        params.addParam('success_message', 'OK', "The successful message")
        params.addParam(
            'redirect_output', False,
            "Redirect stdout to files. Neccessary when expecting an error when using parallel options"
        )

        params.addParam('cli_args', "",
                        "Additional arguments to be passed to the test.")
        params.addParam(
            'allow_test_objects', False,
            "Allow the use of test objects by adding --allow-test-objects to the command line."
        )

        params.addParam(
            'valgrind', 'NONE',
            "Set to (NONE, NORMAL, HEAVY) to determine which configurations where valgrind will run."
        )
        params.addParam('tags', [], "A list of strings")
        params.addParam(
            'max_buffer_size', None,
            "Bytes allowed in stdout/stderr before it is subjected to being trimmed. Set to -1 to ignore output size restrictions. "
            "If 'max_buffer_size' is not set, the default value of 'None' triggers a reasonable value (e.g. 100 kB)"
        )
        params.addParam(
            'parallel_scheduling', False,
            "Allow all tests in test spec file to run in parallel (adheres to prereq rules)."
        )

        # Test Filters
        params.addParam(
            'platform', ['ALL'],
            "A list of platforms for which this test will run on. ('ALL', 'DARWIN', 'LINUX', 'SL', 'LION', 'ML')"
        )
        params.addParam(
            'compiler', ['ALL'],
            "A list of compilers for which this test is valid on. ('ALL', 'GCC', 'INTEL', 'CLANG')"
        )
        params.add(
            'petsc_version',
            vtype=str,
            array=True,
            default=('ALL', ),
            doc=
            "A list of petsc versions for which this test will run on, supports normal comparison operators ('<', '>', etc...)"
        )
        params.addParam(
            'petsc_version_release', ['ALL'],
            "A test that runs against PETSc master if FALSE ('ALL', 'TRUE', 'FALSE')"
        )
        params.add(
            'slepc_version',
            vtype=str,
            array=True,
            default=('ALL', ),
            doc=
            "A list of slepc versions for which this test will run on, supports normal comparison operators ('<', '>', etc...)"
        )
        params.addParam(
            'mesh_mode', ['ALL'],
            "A list of mesh modes for which this test will run ('DISTRIBUTED', 'REPLICATED')"
        )
        params.addParam('min_ad_size', None,
                        "A minimum AD size for which this test will run")
        params.addParam(
            'ad_mode', ['ALL'],
            "A list of AD modes for which this test will run ('SPARSE', 'NONSPARSE')"
        )
        params.addParam(
            'ad_indexing_type', ['ALL'],
            "A list of AD indexing types for which this test will run ('LOCAL', 'GLOBAL')"
        )
        params.addParam(
            'method', ['ALL'],
            "A test that runs under certain executable configurations ('ALL', 'OPT', 'DBG', 'DEVEL', 'OPROF', 'PRO')"
        )
        params.addParam(
            'library_mode', ['ALL'],
            "A test that only runs when libraries are built under certain configurations ('ALL', 'STATIC', 'DYNAMIC')"
        )
        params.addParam(
            'dtk', ['ALL'],
            "A test that runs only if DTK is detected ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam('unique_ids', ['ALL'],
                        "Deprecated. Use unique_id instead.")
        params.addParam('recover', True,
                        "A test that runs with '--recover' mode enabled")
        params.addParam(
            'vtk', ['ALL'],
            "A test that runs only if VTK is detected ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            'tecplot', ['ALL'],
            "A test that runs only if Tecplot is detected ('ALL', 'TRUE', 'FALSE')"
        )
        params.add(
            'dof_id_bytes',
            vtype=str,
            array=True,
            default=('ALL', ),
            doc=
            "A test that runs only if libmesh is configured --with-dof-id-bytes = a specific number, e.g. '4', '8'"
        )
        params.addParam(
            'petsc_debug', ['ALL'],
            "{False,True} -> test only runs when PETSc is configured with --with-debugging={0,1}, otherwise test always runs."
        )
        params.addParam(
            'curl', ['ALL'],
            "A test that runs only if CURL is detected ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            'threading', ['ALL'],
            "A list of threading models ths tests runs with ('ALL', 'TBB', 'OPENMP', 'PTHREADS', 'NONE')"
        )
        params.addParam(
            'superlu', ['ALL'],
            "A test that runs only if SuperLU is available via PETSc ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            'mumps', ['ALL'],
            "A test that runs only if MUMPS is available via PETSc ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            'strumpack', ['ALL'],
            "A test that runs only if STRUMPACK is available via PETSc ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            'chaco', ['ALL'],
            "A test that runs only if Chaco (partitioner) is available via PETSc ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            'parmetis', ['ALL'],
            "A test that runs only if Parmetis (partitioner) is available via PETSc ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            'party', ['ALL'],
            "A test that runs only if Party (partitioner) is available via PETSc ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            'ptscotch', ['ALL'],
            "A test that runs only if PTScotch (partitioner) is available via PETSc ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            'slepc', ['ALL'],
            "A test that runs only if SLEPc is available ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            'unique_id', ['ALL'],
            "A test that runs only if libmesh is configured with --enable-unique-id ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            'cxx11', ['ALL'],
            "A test that runs only if CXX11 is available ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            'asio', ['ALL'],
            "A test that runs only if ASIO is available ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            'fparser_jit', ['ALL'],
            "A test that runs only if FParser JIT is available ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            'libpng', ['ALL'],
            "A test that runs only if libpng is available ('ALL', 'TRUE', 'FALSE')"
        )

        params.addParam(
            'depend_files', [],
            "A test that only runs if all depend files exist (files listed are expected to be relative to the base directory, not the test directory"
        )
        params.addParam(
            'env_vars', [],
            "A test that only runs if all the environment variables listed exist"
        )
        params.addParam(
            'should_execute', True,
            'Whether or not the executable needs to be run.  Use this to chain together multiple tests based off of one executeable invocation'
        )
        params.addParam(
            'required_submodule', [],
            "A list of initialized submodules for which this test requires.")
        params.addParam(
            'required_objects', [],
            "A list of required objects that are in the executable.")
        params.addParam(
            'required_applications', [],
            "A list of required registered applications that are in the executable."
        )
        params.addParam('check_input', False,
                        "Check for correct input file syntax")
        params.addParam(
            'display_required', False,
            "The test requires and active display for rendering (i.e., ImageDiff tests)."
        )
        params.addParam(
            'timing', True,
            "If True, the test will be allowed to run with the timing flag (i.e. Manually turning on performance logging)."
        )
        params.addParam(
            'boost', ['ALL'],
            "A test that runs only if BOOST is detected ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            'python', None,
            "Restrict the test to s specific version of python (e.g., 3.6 or 3.7.1)."
        )
        params.addParam(
            'required_python_packages', None,
            "Test will only run if the supplied python packages exist.")
        params.addParam(
            'requires', None,
            "A list of programs required for the test to operate, as tested with shutil.which."
        )
        params.addParam(
            "working_directory", None,
            "When set, TestHarness will enter this directory before running test"
        )
        params.addParam(
            "moosetools", True,
            "When False the test will not when using the moosetools version of Tester."
        )

        # SQA
        params.addParam(
            "requirement", None,
            "The SQA requirement that this test satisfies (e.g., 'The Marker system shall provide means to mark elements for refinement within a box region.')"
        )
        params.addParam(
            "design", [],
            "The list of markdown files that contain the design(s) associated with this test (e.g., '/Markers/index.md /BoxMarker.md')."
        )
        params.addParam(
            "issues", [],
            "The list of github issues associated with this test (e.g., '#1234 #4321')"
        )
        params.addParam(
            "detail", None,
            "Details of SQA requirement for use within sub-blocks.")
        params.addParam("validation", False,
                        "Set to True to mark test as a validation problem.")
        params.addParam("verification", False,
                        "Set to True to mark test as a verification problem.")
        params.addParam(
            "deprecated", False,
            "When True the test is no longer considered part SQA process and as such does not include the need for a requirement definition."
        )
        params.addParam(
            "collections", [],
            "A means for defining a collection of tests for SQA process.")
        params.addParam(
            "classification", 'functional',
            "A means for defining a requirement classification for SQA process."
        )
        return params