Esempio n. 1
0
    def __init__(self, harness, params):
        MooseObject.__init__(self, harness, params)

        ## The test harness to run callbacks on
        self.harness = harness

        # Retrieve and store the TestHarness options for use in this object
        self.options = harness.getOptions()

        # The Scheduler class can be initialized with no "max_processes" argument and it'll default
        # to a soft limit. If however a max_processes is passed we'll treat it as a hard limit.
        # The difference is whether or not we allow single jobs to exceed the number of slots.
        if params['max_processes'] == None:
            self.available_slots = 1
            self.soft_limit = True
        else:
            self.available_slots = params['max_processes'] # hard limit
            self.soft_limit = False

        # Requested average load level to stay below
        self.average_load = params['average_load']

        # The time the status queue reported no activity to the TestHarness
        self.last_reported = clock()

        # A set containing jobs that have been reported
        self.jobs_reported = set([])

        # Initialize run_pool based on available slots
        self.run_pool = ThreadPool(processes=self.available_slots)

        # Initialize status_pool to only use 1 process (to prevent status messages from getting clobbered)
        self.status_pool = ThreadPool(processes=1)

        # Slot Lock when processing resource allocations
        self.slot_lock = threading.Lock()

        # DAG Lock when processing the DAG
        self.dag_lock = threading.Lock()

        # Queue Lock when processing thread pool states
        self.queue_lock = threading.Lock()

        # Job Count Lock when processing job_queue_count
        self.job_queue_lock = threading.Lock()

        # Workers in use (single job might request multiple slots)
        self.slots_in_use = 0

        # Jobs waiting to finish (includes actively running jobs)
        self.job_queue_count = 0

        # Set containing our Job containers. We use this in the event of a KeyboardInterrupt to
        # iterate over and kill any subprocesses
        self.tester_datas = set([])

        # Allow threads to set an exception state
        self.error_state = False
Esempio n. 2
0
    def validParams():
        params = MooseObject.validParams()
        params.addRequiredParam('average_load',  64.0, "Average load to allow")
        params.addRequiredParam('max_processes', None, "Hard limit of maxium processes to use")
        params.addParam('min_reported_time', 10, "The minimum time elapsed before a job is reported as taking to long to run.")

        return params
Esempio n. 3
0
    def validParams():
        params = MooseObject.validParams()
        params.addRequiredParam('average_load',  64.0, "Average load to allow")
        params.addRequiredParam('max_processes', None, "Hard limit of maxium processes to use")
        params.addParam('min_reported_time', 10, "The minimum time elapsed before a job is reported as taking to long to run.")

        return params
Esempio n. 4
0
    def validParams():
        params = MooseObject.validParams()
        params.addRequiredParam('average_load', 64.0, "Average load to allow")
        params.addRequiredParam('max_processes', None,
                                "Hard limit of maxium processes to use")

        return params
Esempio n. 5
0
    def validParams():
        params = MooseObject.validParams()

        # Common Options
        params.addRequiredParam('type', "The type of test of Tester to create for this test.")
        params.addParam('max_time',   300, "The maximum in seconds that the test will be allowed to run.")
        params.addParam('min_reported_time', 10, "The minimum time elapsed before a test is reported as taking to long to run.")
        params.addParam('skip',     "Provide a reason this test will be skipped.")
        params.addParam('deleted',         "Tests that only show up when using the '-e' option (Permanently skipped or not implemented).")

        params.addParam('heavy',    False, "Set to True if this test should only be run when the '--heavy' option is used.")
        params.addParam('group',       [], "A list of groups for which this test belongs.")
        params.addParam('prereq',      [], "A list of prereq tests that need to run successfully before launching this test.")
        params.addParam('skip_checks', False, "Tells the TestHarness to skip additional checks (This parameter is set automatically by the TestHarness during recovery tests)")
        params.addParam('scale_refine',    0, "The number of refinements to do when scaling")
        params.addParam('success_message', 'OK', "The successful message")

        params.addParam('cli_args',       [], "Additional arguments to be passed to the test.")
        params.addParam('allow_test_objects', False, "Allow the use of test objects by adding --allow-test-objects to the command line.")

        params.addParam('valgrind', 'NONE', "Set to (NONE, NORMAL, HEAVY) to determine which configurations where valgrind will run.")

        # Test Filters
        params.addParam('platform',      ['ALL'], "A list of platforms for which this test will run on. ('ALL', 'DARWIN', 'LINUX', 'SL', 'LION', 'ML')")
        params.addParam('compiler',      ['ALL'], "A list of compilers for which this test is valid on. ('ALL', 'GCC', 'INTEL', 'CLANG')")
        params.addParam('petsc_version', ['ALL'], "A list of petsc versions for which this test will run on, supports normal comparison operators ('<', '>', etc...)")
        params.addParam('petsc_version_release', ['ALL'], "A test that runs against PETSc master if FALSE ('ALL', 'TRUE', 'FALSE')")
        params.addParam('slepc_version', [], "A list of slepc versions for which this test will run on, supports normal comparison operators ('<', '>', etc...)")
        params.addParam('mesh_mode',     ['ALL'], "A list of mesh modes for which this test will run ('DISTRIBUTED', 'REPLICATED')")
        params.addParam('method',        ['ALL'], "A test that runs under certain executable configurations ('ALL', 'OPT', 'DBG', 'DEVEL', 'OPROF', 'PRO')")
        params.addParam('library_mode',  ['ALL'], "A test that only runs when libraries are built under certain configurations ('ALL', 'STATIC', 'DYNAMIC')")
        params.addParam('dtk',           ['ALL'], "A test that runs only if DTK is detected ('ALL', 'TRUE', 'FALSE')")
        params.addParam('unique_ids',    ['ALL'], "A test that runs only if UNIQUE_IDs are enabled ('ALL', 'TRUE', 'FALSE')")
        params.addParam('recover',       True,    "A test that runs with '--recover' mode enabled")
        params.addParam('vtk',           ['ALL'], "A test that runs only if VTK is detected ('ALL', 'TRUE', 'FALSE')")
        params.addParam('tecplot',       ['ALL'], "A test that runs only if Tecplot is detected ('ALL', 'TRUE', 'FALSE')")
        params.addParam('dof_id_bytes',  ['ALL'], "A test that runs only if libmesh is configured --with-dof-id-bytes = a specific number, e.g. '4', '8'")
        params.addParam('petsc_debug',   ['ALL'], "{False,True} -> test only runs when PETSc is configured with --with-debugging={0,1}, otherwise test always runs.")
        params.addParam('curl',          ['ALL'], "A test that runs only if CURL is detected ('ALL', 'TRUE', 'FALSE')")
        params.addParam('tbb',           ['ALL'], "A test that runs only if TBB is available ('ALL', 'TRUE', 'FALSE')")
        params.addParam('superlu',       ['ALL'], "A test that runs only if SuperLU is available via PETSc ('ALL', 'TRUE', 'FALSE')")
        params.addParam('slepc',         ['ALL'], "A test that runs only if SLEPc is available ('ALL', 'TRUE', 'FALSE')")
        params.addParam('unique_id',     ['ALL'], "A test that runs only if libmesh is configured with --enable-unique-id ('ALL', 'TRUE', 'FALSE')")
        params.addParam('cxx11',         ['ALL'], "A test that runs only if CXX11 is available ('ALL', 'TRUE', 'FALSE')")
        params.addParam('asio',          ['ALL'], "A test that runs only if ASIO is available ('ALL', 'TRUE', 'FALSE')")
        params.addParam('depend_files',  [], "A test that only runs if all depend files exist (files listed are expected to be relative to the base directory, not the test directory")
        params.addParam('env_vars',      [], "A test that only runs if all the environment variables listed exist")
        params.addParam('should_execute', True, 'Whether or not the executable needs to be run.  Use this to chain together multiple tests based off of one executeable invocation')
        params.addParam('required_submodule', [], "A list of initialized submodules for which this test requires.")
        params.addParam('required_objects', [], "A list of required objects that are in the executable.")
        params.addParam('check_input',    False, "Check for correct input file syntax")
        params.addParam('display_required', False, "The test requires and active display for rendering (i.e., ImageDiff tests).")
        params.addParam('boost',         ['ALL'], "A test that runs only if BOOT is detected ('ALL', 'TRUE', 'FALSE')")

        # Queueing specific
        params.addParam('copy_files',         [], "Additional list of files/directories to copy when performing queueing operations")
        params.addParam('link_files',         [], "Additional list of files/directories to symlink when performing queueing operations")
        params.addParam('queue_scheduler',  True, "A test that runs only if using queue options")

        return params
Esempio n. 6
0
    def validParams():
        params = MooseObject.validParams()

        # Common Options
        params.addRequiredParam('type', "The type of test of Tester to create for this test.")
        params.addParam('max_time',   300, "The maximum in seconds that the test will be allowed to run.")
        params.addParam('min_reported_time', 10, "The minimum time elapsed before a test is reported as taking to long to run.")
        params.addParam('skip',     "Provide a reason this test will be skipped.")
        params.addParam('deleted',         "Tests that only show up when using the '-e' option (Permanently skipped or not implemented).")

        params.addParam('heavy',    False, "Set to True if this test should only be run when the '--heavy' option is used.")
        params.addParam('group',       [], "A list of groups for which this test belongs.")
        params.addParam('prereq',      [], "A list of prereq tests that need to run successfully before launching this test.")
        params.addParam('skip_checks', False, "Tells the TestHarness to skip additional checks (This parameter is set automatically by the TestHarness during recovery tests)")
        params.addParam('scale_refine',    0, "The number of refinements to do when scaling")
        params.addParam('success_message', 'OK', "The successful message")

        params.addParam('cli_args',       [], "Additional arguments to be passed to the test.")
        params.addParam('allow_test_objects', False, "Allow the use of test objects by adding --allow-test-objects to the command line.")

        params.addParam('valgrind', 'NONE', "Set to (NONE, NORMAL, HEAVY) to determine which configurations where valgrind will run.")

        # Test Filters
        params.addParam('platform',      ['ALL'], "A list of platforms for which this test will run on. ('ALL', 'DARWIN', 'LINUX', 'SL', 'LION', 'ML')")
        params.addParam('compiler',      ['ALL'], "A list of compilers for which this test is valid on. ('ALL', 'GCC', 'INTEL', 'CLANG')")
        params.addParam('petsc_version', ['ALL'], "A list of petsc versions for which this test will run on, supports normal comparison operators ('<', '>', etc...)")
        params.addParam('petsc_version_release', ['ALL'], "A test that runs against PETSc master if FALSE ('ALL', 'TRUE', 'FALSE')")
        params.addParam('slepc_version', [], "A list of slepc versions for which this test will run on, supports normal comparison operators ('<', '>', etc...)")
        params.addParam('mesh_mode',     ['ALL'], "A list of mesh modes for which this test will run ('DISTRIBUTED', 'REPLICATED')")
        params.addParam('method',        ['ALL'], "A test that runs under certain executable configurations ('ALL', 'OPT', 'DBG', 'DEVEL', 'OPROF', 'PRO')")
        params.addParam('library_mode',  ['ALL'], "A test that only runs when libraries are built under certain configurations ('ALL', 'STATIC', 'DYNAMIC')")
        params.addParam('dtk',           ['ALL'], "A test that runs only if DTK is detected ('ALL', 'TRUE', 'FALSE')")
        params.addParam('unique_ids',    ['ALL'], "A test that runs only if UNIQUE_IDs are enabled ('ALL', 'TRUE', 'FALSE')")
        params.addParam('recover',       True,    "A test that runs with '--recover' mode enabled")
        params.addParam('vtk',           ['ALL'], "A test that runs only if VTK is detected ('ALL', 'TRUE', 'FALSE')")
        params.addParam('tecplot',       ['ALL'], "A test that runs only if Tecplot is detected ('ALL', 'TRUE', 'FALSE')")
        params.addParam('dof_id_bytes',  ['ALL'], "A test that runs only if libmesh is configured --with-dof-id-bytes = a specific number, e.g. '4', '8'")
        params.addParam('petsc_debug',   ['ALL'], "{False,True} -> test only runs when PETSc is configured with --with-debugging={0,1}, otherwise test always runs.")
        params.addParam('curl',          ['ALL'], "A test that runs only if CURL is detected ('ALL', 'TRUE', 'FALSE')")
        params.addParam('tbb',           ['ALL'], "A test that runs only if TBB is available ('ALL', 'TRUE', 'FALSE')")
        params.addParam('superlu',       ['ALL'], "A test that runs only if SuperLU is available via PETSc ('ALL', 'TRUE', 'FALSE')")
        params.addParam('slepc',         ['ALL'], "A test that runs only if SLEPc is available ('ALL', 'TRUE', 'FALSE')")
        params.addParam('unique_id',     ['ALL'], "A test that runs only if libmesh is configured with --enable-unique-id ('ALL', 'TRUE', 'FALSE')")
        params.addParam('cxx11',         ['ALL'], "A test that runs only if CXX11 is available ('ALL', 'TRUE', 'FALSE')")
        params.addParam('asio',          ['ALL'], "A test that runs only if ASIO is available ('ALL', 'TRUE', 'FALSE')")
        params.addParam('depend_files',  [], "A test that only runs if all depend files exist (files listed are expected to be relative to the base directory, not the test directory")
        params.addParam('env_vars',      [], "A test that only runs if all the environment variables listed exist")
        params.addParam('should_execute', True, 'Whether or not the executable needs to be run.  Use this to chain together multiple tests based off of one executeable invocation')
        params.addParam('required_submodule', [], "A list of initialized submodules for which this test requires.")
        params.addParam('required_objects', [], "A list of required objects that are in the executable.")
        params.addParam('check_input',    False, "Check for correct input file syntax")
        params.addParam('display_required', False, "The test requires and active display for rendering (i.e., ImageDiff tests).")
        params.addParam('boost',         ['ALL'], "A test that runs only if BOOT is detected ('ALL', 'TRUE', 'FALSE')")

        # Queueing specific
        params.addParam('copy_files',         [], "Additional list of files/directories to copy when performing queueing operations")
        params.addParam('link_files',         [], "Additional list of files/directories to symlink when performing queueing operations")
        params.addParam('queue_scheduler',  True, "A test that runs only if using queue options")

        return params
Esempio n. 7
0
    def __init__(self, name, params):
        MooseObject.__init__(self, name, params)
        self.specs = params
        self.outfile = None
        self.errfile = None
        self.joined_out = ''
        self.exit_code = 0
        self.process = None
        self.tags = params['tags']
        self.__caveats = set([])

        # Bool if test can run
        self._runnable = None

        # Initialize the status bucket class
        self.status = util.TestStatus()

        # Enumerate the buckets here so ther are easier to work with in the tester class
        self.bucket_initialized = self.status.bucket_initialized
        self.bucket_success = self.status.bucket_success
        self.bucket_fail = self.status.bucket_fail
        self.bucket_diff = self.status.bucket_diff
        self.bucket_pending = self.status.bucket_pending
        self.bucket_finished = self.status.bucket_finished
        self.bucket_deleted = self.status.bucket_deleted
        self.bucket_skip = self.status.bucket_skip
        self.bucket_silent = self.status.bucket_silent
        self.bucket_queued = self.status.bucket_queued
        self.bucket_waiting_processing = self.status.bucket_waiting_processing

        # Set the status message
        if self.specs['check_input']:
            self.success_message = 'SYNTAX PASS'
        else:
            self.success_message = self.specs['success_message']

        # Set up common paramaters
        self.should_execute = self.specs['should_execute']
        self.check_input = self.specs['check_input']

        if self.specs["allow_test_objects"]:
            self.specs["cli_args"].append("--allow-test-objects")
Esempio n. 8
0
    def __init__(self, name, params):
        MooseObject.__init__(self, name, params)
        self.specs = params
        self.outfile = None
        self.errfile = None
        self.joined_out = ''
        self.exit_code = 0
        self.process = None
        self.tags = params['tags']
        self.__caveats = set([])

        # Bool if test can run
        self._runnable = None

        # Initialize the status bucket class
        self.status = util.TestStatus()

        # Enumerate the buckets here so ther are easier to work with in the tester class
        self.bucket_initialized  = self.status.bucket_initialized
        self.bucket_success      = self.status.bucket_success
        self.bucket_fail         = self.status.bucket_fail
        self.bucket_diff         = self.status.bucket_diff
        self.bucket_pending      = self.status.bucket_pending
        self.bucket_finished     = self.status.bucket_finished
        self.bucket_deleted      = self.status.bucket_deleted
        self.bucket_skip         = self.status.bucket_skip
        self.bucket_silent       = self.status.bucket_silent
        self.bucket_queued       = self.status.bucket_queued
        self.bucket_waiting_processing = self.status.bucket_waiting_processing

        # Set the status message
        if self.specs['check_input']:
            self.success_message = 'SYNTAX PASS'
        else:
            self.success_message = self.specs['success_message']

        # Set up common paramaters
        self.should_execute = self.specs['should_execute']
        self.check_input = self.specs['check_input']

        if self.specs["allow_test_objects"]:
            self.specs["cli_args"].append("--allow-test-objects")
Esempio n. 9
0
    def __init__(self, name, params):
        MooseObject.__init__(self, name, params)
        self.specs = params
        self.outfile = None
        self.errfile = None
        self.joined_out = ''
        self.exit_code = 0
        self.process = None
        self.tags = params['tags']
        self.__caveats = set([])

        # Bool if test can run
        self._runnable = None

        # Set up common paramaters
        self.should_execute = self.specs['should_execute']
        self.check_input = self.specs['check_input']

        if self.specs["allow_test_objects"]:
            self.specs["cli_args"].append("--allow-test-objects")
Esempio n. 10
0
    def __init__(self, name, params):
        MooseObject.__init__(self, name, params)
        self.specs = params
        self.outfile = None
        self.errfile = None
        self.joined_out = ''
        self.exit_code = 0
        self.process = None
        self.tags = params['tags']
        self.__caveats = set([])

        # Bool if test can run
        self._runnable = None

        # Set up common paramaters
        self.should_execute = self.specs['should_execute']
        self.check_input = self.specs['check_input']

        if self.specs["allow_test_objects"]:
            self.specs["cli_args"].append("--allow-test-objects")
Esempio n. 11
0
    def __init__(self, name, params):
        MooseObject.__init__(self, name, params)
        self.specs = params
        self.outfile = None
        self.errfile = None
        self.joined_out = ''
        self.exit_code = 0
        self.process = None
        self.tags = params['tags']
        self.__caveats = set([])

        # Alternate text we want to print as part of our status instead of the
        # pre-formatted status text (SYNTAX PASS instead of OK for example)
        self.__tester_message = ''

        # Bool if test can run
        self._runnable = None

        # Set up common paramaters
        self.should_execute = self.specs['should_execute']
        self.check_input = self.specs['check_input']

        if self.specs["allow_test_objects"]:
            self.specs["cli_args"].append("--allow-test-objects")

        ### Enumerate the tester statuses we want to use
        self.test_status = StatusSystem()
        self.no_status = self.test_status.no_status
        self.queued = self.test_status.queued
        self.skip = self.test_status.skip
        self.silent = self.test_status.silent
        self.success = self.test_status.success
        self.fail = self.test_status.fail
        self.diff = self.test_status.diff
        self.deleted = self.test_status.deleted

        self.__failed_statuses = [self.fail, self.diff, self.deleted]
        self.__skipped_statuses = [self.skip, self.silent]
Esempio n. 12
0
    def validParams():
        params = MooseObject.validParams()
        params.addRequiredParam('average_load',  64.0, "Average load to allow")
        params.addRequiredParam('max_processes', None, "Hard limit of maxium processes to use")

        return params
Esempio n. 13
0
    def __init__(self, harness, params):
        MooseObject.__init__(self, harness, params)

        ## The test harness to run callbacks on
        self.harness = harness

        # Retrieve and store the TestHarness options for use in this object
        self.options = harness.getOptions()

        # The Scheduler class can be initialized with no "max_processes" argument and it'll default
        # to a soft limit. If however a max_processes is passed we'll treat it as a hard limit.
        # The difference is whether or not we allow single jobs to exceed the number of slots.
        if params['max_processes'] == None:
            self.available_slots = 1
            self.soft_limit = True
        else:
            self.available_slots = params['max_processes'] # hard limit
            self.soft_limit = False

        self.average_load = params['average_load']

        self.min_report_time = params['min_reported_time']

        # Initialize run_pool based on available slots
        self.run_pool = ThreadPool(processes=self.available_slots)

        # Initialize status_pool to only use 1 process (to prevent status messages from getting clobbered)
        self.status_pool = ThreadPool(processes=1)

        # Slot lock when processing resource allocations and modifying slots_in_use
        self.slot_lock = threading.Lock()

        # Job lock when modifying a jobs status
        self.activity_lock = threading.Lock()

        # Job count lock when modifying incoming/outgoing jobs
        self.job_count_lock = threading.Lock()

        # A combination of processors + threads (-j/-n) currently in use, that a job requires
        self.slots_in_use = 0

        # Count of jobs which need to complete
        self.job_count = 0

        # Set containing all submitted jobs
        self.__job_bank = set([])

        # Total running Job and Test failures encountered
        self.__failures = 0

        # Allow threads to set a global exception
        self.__error_state = False

        # Private set of jobs currently running
        self.__active_jobs = set([])

        # Jobs that are taking longer to finish than the alloted time are reported back early to inform
        # the user 'stuff' is still running. Jobs entering this set will not be reported again.
        self.jobs_reported = set([])

        # The last time the scheduler reported something
        self.last_reported_time = clock()

        # Sets of threading objects created by jobs entering and exiting the queues. When scheduler.waitFinish()
        # is called, and both thread pools are empty, the pools shut down, and the call to waitFinish() returns.
        self.__status_pool_lock = threading.Lock()
        self.__runner_pool_lock = threading.Lock()
        self.__status_pool_jobs = set([])
        self.__runner_pool_jobs = set([])

        # True when scheduler.waitFinish() is called. This alerts the scheduler, no more jobs are
        # to be scheduled. KeyboardInterrupts are then handled by the thread pools.
        self.__waiting = False
Esempio n. 14
0
    def validParams():
        params = MooseObject.validParams()

        # Common Options
        params.addRequiredParam(
            'type', "The type of test of Tester to create for this test.")
        params.addParam(
            'max_time', 300,
            "The maximum in seconds that the test will be allowed to run.")
        params.addParam('skip', "Provide a reason this test will be skipped.")
        params.addParam(
            'deleted',
            "Tests that only show up when using the '-e' option (Permanently skipped or not implemented)."
        )

        params.addParam(
            'heavy', False,
            "Set to True if this test should only be run when the '--heavy' option is used."
        )
        params.addParam('group', [],
                        "A list of groups for which this test belongs.")
        params.addParam(
            'prereq', [],
            "A list of prereq tests that need to run successfully before launching this test."
        )
        params.addParam(
            'skip_checks', False,
            "Tells the TestHarness to skip additional checks (This parameter is set automatically by the TestHarness during recovery tests)"
        )
        params.addParam('scale_refine', 0,
                        "The number of refinements to do when scaling")
        params.addParam('success_message', 'OK', "The successful message")

        params.addParam('cli_args', [],
                        "Additional arguments to be passed to the test.")
        params.addParam(
            'allow_test_objects', False,
            "Allow the use of test objects by adding --allow-test-objects to the command line."
        )

        params.addParam(
            'valgrind', 'NONE',
            "Set to (NONE, NORMAL, HEAVY) to determine which configurations where valgrind will run."
        )
        params.addParam('tags', [], "A list of strings")
        params.addParam(
            'max_buffer_size', None,
            "Bytes allowed in stdout/stderr before it is subjected to being trimmed. Set to -1 to ignore output size restrictions. "
            "If 'max_buffer_size' is not set, the default value of 'None' triggers a reasonable value (e.g. 100 kB)"
        )

        # Test Filters
        params.addParam(
            'platform', ['ALL'],
            "A list of platforms for which this test will run on. ('ALL', 'DARWIN', 'LINUX', 'SL', 'LION', 'ML')"
        )
        params.addParam(
            'compiler', ['ALL'],
            "A list of compilers for which this test is valid on. ('ALL', 'GCC', 'INTEL', 'CLANG')"
        )
        params.addParam(
            'petsc_version', ['ALL'],
            "A list of petsc versions for which this test will run on, supports normal comparison operators ('<', '>', etc...)"
        )
        params.addParam(
            'petsc_version_release', ['ALL'],
            "A test that runs against PETSc master if FALSE ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            'slepc_version', [],
            "A list of slepc versions for which this test will run on, supports normal comparison operators ('<', '>', etc...)"
        )
        params.addParam(
            'mesh_mode', ['ALL'],
            "A list of mesh modes for which this test will run ('DISTRIBUTED', 'REPLICATED')"
        )
        params.addParam(
            'method', ['ALL'],
            "A test that runs under certain executable configurations ('ALL', 'OPT', 'DBG', 'DEVEL', 'OPROF', 'PRO')"
        )
        params.addParam(
            'library_mode', ['ALL'],
            "A test that only runs when libraries are built under certain configurations ('ALL', 'STATIC', 'DYNAMIC')"
        )
        params.addParam(
            'dtk', ['ALL'],
            "A test that runs only if DTK is detected ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            'unique_ids', ['ALL'],
            "A test that runs only if UNIQUE_IDs are enabled ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam('recover', True,
                        "A test that runs with '--recover' mode enabled")
        params.addParam(
            'vtk', ['ALL'],
            "A test that runs only if VTK is detected ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            'tecplot', ['ALL'],
            "A test that runs only if Tecplot is detected ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            'dof_id_bytes', ['ALL'],
            "A test that runs only if libmesh is configured --with-dof-id-bytes = a specific number, e.g. '4', '8'"
        )
        params.addParam(
            'petsc_debug', ['ALL'],
            "{False,True} -> test only runs when PETSc is configured with --with-debugging={0,1}, otherwise test always runs."
        )
        params.addParam(
            'curl', ['ALL'],
            "A test that runs only if CURL is detected ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            'threading', ['ALL'],
            "A list of threading models ths tests runs with ('ALL', 'TBB', 'OPENMP', 'PTHREADS', 'NONE')"
        )
        params.addParam(
            'superlu', ['ALL'],
            "A test that runs only if SuperLU is available via PETSc ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            'chaco', ['ALL'],
            "A test that runs only if Chaco (partitioner) is available via PETSc ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            'parmetis', ['ALL'],
            "A test that runs only if Parmetis (partitioner) is available via PETSc ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            'party', ['ALL'],
            "A test that runs only if Party (partitioner) is available via PETSc ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            'ptscotch', ['ALL'],
            "A test that runs only if PTScotch (partitioner) is available via PETSc ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            'slepc', ['ALL'],
            "A test that runs only if SLEPc is available ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            'unique_id', ['ALL'],
            "A test that runs only if libmesh is configured with --enable-unique-id ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            'cxx11', ['ALL'],
            "A test that runs only if CXX11 is available ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            'asio', ['ALL'],
            "A test that runs only if ASIO is available ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            "fparser_jit", ['ALL'],
            "A test that runs only if FParser JIT is available ('ALL', 'TRUE', 'FALSE')"
        )
        params.addParam(
            'depend_files', [],
            "A test that only runs if all depend files exist (files listed are expected to be relative to the base directory, not the test directory"
        )
        params.addParam(
            'env_vars', [],
            "A test that only runs if all the environment variables listed exist"
        )
        params.addParam(
            'should_execute', True,
            'Whether or not the executable needs to be run.  Use this to chain together multiple tests based off of one executeable invocation'
        )
        params.addParam(
            'required_submodule', [],
            "A list of initialized submodules for which this test requires.")
        params.addParam(
            'required_objects', [],
            "A list of required objects that are in the executable.")
        params.addParam(
            'required_applications', [],
            "A list of required registered applications that are in the executable."
        )
        params.addParam('check_input', False,
                        "Check for correct input file syntax")
        params.addParam(
            'display_required', False,
            "The test requires and active display for rendering (i.e., ImageDiff tests)."
        )
        params.addParam(
            'timing', True,
            "If True, the test will be allowed to run with the timing flag (i.e. Manually turning on performance logging)."
        )
        params.addParam(
            'boost', ['ALL'],
            "A test that runs only if BOOST is detected ('ALL', 'TRUE', 'FALSE')"
        )

        # SQA
        params.addParam(
            "requirement", None,
            "The SQA requirement that this test satisfies (e.g., 'The Marker system shall provide means to mark elements for refinement within a box region.')"
        )
        params.addParam(
            "design", [],
            "The list of markdown files that contain the design(s) associated with this test (e.g., '/Markers/index.md /BoxMarker.md')."
        )
        params.addParam(
            "issues", [],
            "The list of github issues associated with this test (e.g., '#1234 #4321')"
        )
        params.addParam("validation", False,
                        "Set to True to mark test as a validation problem.")
        params.addParam("verification", False,
                        "Set to True to mark test as a verification problem.")
        return params
Esempio n. 15
0
    def __init__(self, harness, params):
        MooseObject.__init__(self, harness, params)

        ## The test harness to run callbacks on
        self.harness = harness

        # Retrieve and store the TestHarness options for use in this object
        self.options = harness.getOptions()

        # The Scheduler class can be initialized with no "max_processes" argument and it'll default
        # to a soft limit. If however a max_processes is passed we'll treat it as a hard limit.
        # The difference is whether or not we allow single jobs to exceed the number of slots.
        if params['max_processes'] == None:
            self.available_slots = 1
            self.soft_limit = True
        else:
            self.available_slots = params['max_processes']  # hard limit
            self.soft_limit = False

        self.average_load = params['average_load']

        self.min_report_time = params['min_reported_time']

        # Initialize run_pool based on available slots
        self.run_pool = ThreadPool(processes=self.available_slots)

        # Initialize status_pool to only use 1 process (to prevent status messages from getting clobbered)
        self.status_pool = ThreadPool(processes=1)

        # Slot lock when processing resource allocations and modifying slots_in_use
        self.slot_lock = threading.Lock()

        # Job lock when modifying a jobs status
        self.activity_lock = threading.Lock()

        # A combination of processors + threads (-j/-n) currently in use, that a job requires
        self.slots_in_use = 0

        # Set containing all scheduled jobs
        self.__scheduled_jobs = set([])

        # Set containing jobs entering the run_pool
        self.__job_bank = set([])

        # Total running Job and Test failures encountered
        self.__failures = 0

        # Allow threads to set a global exception
        self.__error_state = False

        # Private set of jobs currently running
        self.__active_jobs = set([])

        # Jobs that are taking longer to finish than the alloted time are reported back early to inform
        # the user 'stuff' is still running. Jobs entering this set will not be reported again.
        self.jobs_reported = set([])

        # The last time the scheduler reported something
        self.last_reported_time = clock()

        # Sets of threading objects created by jobs entering and exiting the queues. When scheduler.waitFinish()
        # is called, and both thread pools are empty, the pools shut down, and the call to waitFinish() returns.
        self.__status_pool_lock = threading.Lock()
        self.__runner_pool_lock = threading.Lock()
        self.__status_pool_jobs = set([])
        self.__runner_pool_jobs = set([])

        # True when scheduler.waitFinish() is called. This alerts the scheduler, no more jobs are
        # to be scheduled. KeyboardInterrupts are then handled by the thread pools.
        self.__waiting = False
Esempio n. 16
0
    def validParams():
        params = MooseObject.validParams()

        # Common Options
        params.addRequiredParam('type', "The type of test of Tester to create for this test.")
        params.addParam('max_time',   300, "The maximum in seconds that the test will be allowed to run.")
        params.addParam('skip',     "Provide a reason this test will be skipped.")
        params.addParam('deleted',         "Tests that only show up when using the '-e' option (Permanently skipped or not implemented).")

        params.addParam('heavy',    False, "Set to True if this test should only be run when the '--heavy' option is used.")
        params.addParam('group',       [], "A list of groups for which this test belongs.")
        params.addParam('prereq',      [], "A list of prereq tests that need to run successfully before launching this test.")
        params.addParam('skip_checks', False, "Tells the TestHarness to skip additional checks (This parameter is set automatically by the TestHarness during recovery tests)")
        params.addParam('scale_refine',    0, "The number of refinements to do when scaling")
        params.addParam('success_message', 'OK', "The successful message")

        params.addParam('cli_args',       [], "Additional arguments to be passed to the test.")
        params.addParam('allow_test_objects', False, "Allow the use of test objects by adding --allow-test-objects to the command line.")

        params.addParam('valgrind', 'NONE', "Set to (NONE, NORMAL, HEAVY) to determine which configurations where valgrind will run.")
        params.addParam('tags',      [], "A list of strings")
        params.addParam('max_buffer_size', None, "Bytes allowed in stdout/stderr before it is subjected to being trimmed. Set to -1 to ignore output size restrictions. "
                                                 "If 'max_buffer_size' is not set, the default value of 'None' triggers a reasonable value (e.g. 100 kB)")

        # Test Filters
        params.addParam('platform',      ['ALL'], "A list of platforms for which this test will run on. ('ALL', 'DARWIN', 'LINUX', 'SL', 'LION', 'ML')")
        params.addParam('compiler',      ['ALL'], "A list of compilers for which this test is valid on. ('ALL', 'GCC', 'INTEL', 'CLANG')")
        params.addParam('petsc_version', ['ALL'], "A list of petsc versions for which this test will run on, supports normal comparison operators ('<', '>', etc...)")
        params.addParam('petsc_version_release', ['ALL'], "A test that runs against PETSc master if FALSE ('ALL', 'TRUE', 'FALSE')")
        params.addParam('slepc_version', [], "A list of slepc versions for which this test will run on, supports normal comparison operators ('<', '>', etc...)")
        params.addParam('mesh_mode',     ['ALL'], "A list of mesh modes for which this test will run ('DISTRIBUTED', 'REPLICATED')")
        params.addParam('method',        ['ALL'], "A test that runs under certain executable configurations ('ALL', 'OPT', 'DBG', 'DEVEL', 'OPROF', 'PRO')")
        params.addParam('library_mode',  ['ALL'], "A test that only runs when libraries are built under certain configurations ('ALL', 'STATIC', 'DYNAMIC')")
        params.addParam('dtk',           ['ALL'], "A test that runs only if DTK is detected ('ALL', 'TRUE', 'FALSE')")
        params.addParam('unique_ids',    ['ALL'], "A test that runs only if UNIQUE_IDs are enabled ('ALL', 'TRUE', 'FALSE')")
        params.addParam('recover',       True,    "A test that runs with '--recover' mode enabled")
        params.addParam('vtk',           ['ALL'], "A test that runs only if VTK is detected ('ALL', 'TRUE', 'FALSE')")
        params.addParam('tecplot',       ['ALL'], "A test that runs only if Tecplot is detected ('ALL', 'TRUE', 'FALSE')")
        params.addParam('dof_id_bytes',  ['ALL'], "A test that runs only if libmesh is configured --with-dof-id-bytes = a specific number, e.g. '4', '8'")
        params.addParam('petsc_debug',   ['ALL'], "{False,True} -> test only runs when PETSc is configured with --with-debugging={0,1}, otherwise test always runs.")
        params.addParam('curl',          ['ALL'], "A test that runs only if CURL is detected ('ALL', 'TRUE', 'FALSE')")
        params.addParam('threading',     ['ALL'], "A list of threading models ths tests runs with ('ALL', 'TBB', 'OPENMP', 'PTHREADS', 'NONE')")
        params.addParam('superlu',       ['ALL'], "A test that runs only if SuperLU is available via PETSc ('ALL', 'TRUE', 'FALSE')")
        params.addParam('chaco',         ['ALL'], "A test that runs only if Chaco (partitioner) is available via PETSc ('ALL', 'TRUE', 'FALSE')")
        params.addParam('parmetis',      ['ALL'], "A test that runs only if Parmetis (partitioner) is available via PETSc ('ALL', 'TRUE', 'FALSE')")
        params.addParam('party',         ['ALL'], "A test that runs only if Party (partitioner) is available via PETSc ('ALL', 'TRUE', 'FALSE')")
        params.addParam('ptscotch',      ['ALL'], "A test that runs only if PTScotch (partitioner) is available via PETSc ('ALL', 'TRUE', 'FALSE')")
        params.addParam('slepc',         ['ALL'], "A test that runs only if SLEPc is available ('ALL', 'TRUE', 'FALSE')")
        params.addParam('unique_id',     ['ALL'], "A test that runs only if libmesh is configured with --enable-unique-id ('ALL', 'TRUE', 'FALSE')")
        params.addParam('cxx11',         ['ALL'], "A test that runs only if CXX11 is available ('ALL', 'TRUE', 'FALSE')")
        params.addParam('asio',          ['ALL'], "A test that runs only if ASIO is available ('ALL', 'TRUE', 'FALSE')")
        params.addParam("fparser_jit",   ['ALL'], "A test that runs only if FParser JIT is available ('ALL', 'TRUE', 'FALSE')")
        params.addParam('depend_files',  [], "A test that only runs if all depend files exist (files listed are expected to be relative to the base directory, not the test directory")
        params.addParam('env_vars',      [], "A test that only runs if all the environment variables listed exist")
        params.addParam('should_execute', True, 'Whether or not the executable needs to be run.  Use this to chain together multiple tests based off of one executeable invocation')
        params.addParam('required_submodule', [], "A list of initialized submodules for which this test requires.")
        params.addParam('required_objects', [], "A list of required objects that are in the executable.")
        params.addParam('required_applications', [], "A list of required registered applications that are in the executable.")
        params.addParam('check_input',    False, "Check for correct input file syntax")
        params.addParam('display_required', False, "The test requires and active display for rendering (i.e., ImageDiff tests).")
        params.addParam('timing',         True, "If True, the test will be allowed to run with the timing flag (i.e. Manually turning on performance logging).")
        params.addParam('boost',         ['ALL'], "A test that runs only if BOOST is detected ('ALL', 'TRUE', 'FALSE')")
        params.addParam('sympy', False, "If True, sympy is required.")

        # SQA
        params.addParam("requirement", None, "The SQA requirement that this test satisfies (e.g., 'The Marker system shall provide means to mark elements for refinement within a box region.')")
        params.addParam("design", [], "The list of markdown files that contain the design(s) associated with this test (e.g., '/Markers/index.md /BoxMarker.md').")
        params.addParam("issues", [], "The list of github issues associated with this test (e.g., '#1234 #4321')")
        params.addParam("validation", False, "Set to True to mark test as a validation problem.")
        params.addParam("verification", False, "Set to True to mark test as a verification problem.")
        return params