Esempio n. 1
0
    def processResults(self, moose_dir, options, output):
        reason = ''
        specs = self.specs

        if self.hasRedirectedOutput(options):
            redirected_output = util.getOutputFromFiles(self, options)
            output += redirected_output

        # Expected errors and assertions might do a lot of things including crash so we
        # will handle them seperately
        if specs.isValid('expect_err'):
            if not util.checkOutputForPattern(output, specs['expect_err']):
                reason = 'EXPECTED ERROR MISSING'
        elif specs.isValid('expect_assert'):
            if options.method in ['dbg', 'devel']:  # Only check asserts in debug and devel modes
                if not util.checkOutputForPattern(output, specs['expect_assert']):
                    reason = 'EXPECTED ASSERT MISSING'

        # If we've set a reason right here, we should report the pattern that we were unable to match.
        if reason != '':
            output += "#"*80 + "\n\nUnable to match the following pattern against the program's output:\n\n" + specs['expect_err'] + "\n"

        if reason == '':
            RunApp.testFileOutput(self, moose_dir, options, output)

        if reason != '':
            self.setStatus(reason, self.bucket_fail)
        else:
            self.setStatus(self.success_message, self.bucket_success)

        return output
Esempio n. 2
0
    def processResults(self, moose_dir, options, output):
        reason = ''
        specs = self.specs

        if self.hasRedirectedOutput(options):
            redirected_output = util.getOutputFromFiles(self, options)
            output += redirected_output

        # Expected errors and assertions might do a lot of things including crash so we
        # will handle them seperately
        if specs.isValid('expect_err'):
            if not util.checkOutputForPattern(output, specs['expect_err']):
                reason = 'NO EXPECTED ERR'
        elif specs.isValid('expect_assert'):
            if options.method == 'dbg':  # Only check asserts in debug mode
                if not util.checkOutputForPattern(output,
                                                  specs['expect_assert']):
                    reason = 'NO EXPECTED ASSERT'

        if reason == '':
            RunApp.testFileOutput(self, moose_dir, options, output)

        if reason != '':
            self.setStatus(reason, self.bucket_fail)
        else:
            self.setStatus(self.success_message, self.bucket_success)

        return output
Esempio n. 3
0
    def __init__(self, name, params):
        RunApp.__init__(self, name, params)

        self.moose_dir = os.environ.get(
            'MOOSE_DIR',
            os.path.abspath(
                os.path.join(os.path.dirname(os.path.realpath(__file__)),
                             '..')))

        if os.environ.has_key("LIBMESH_DIR"):
            self.libmesh_dir = os.environ['LIBMESH_DIR']
        else:
            self.libmesh_dir = os.path.join(self.moose_dir, 'libmesh',
                                            'installed')

        if self.specs['turn_off_exodus_output']:
            self.specs['cli_args'][:0] = ['Outputs/exodus=false']

        if map(int,
               util.getPetscVersion(self.libmesh_dir).split(".")) < [3, 9]:
            self.old_petsc = True
            self.specs['cli_args'].extend(
                ['-snes_type test', '-snes_mf_operator 0'])
        else:
            self.old_petsc = False
            self.specs['cli_args'].extend(
                ['-snes_test_jacobian', '-snes_force_iteration'])
            if not self.specs['run_sim']:
                self.specs['cli_args'].extend([
                    '-snes_type', 'ksponly', '-ksp_type', 'preonly',
                    '-pc_type', 'none', '-snes_convergence_test', 'skip'
                ])
Esempio n. 4
0
    def __init__(self, name, params):
        RunApp.__init__(self, name, params)

        self.moose_dir = os.environ.get(
            'MOOSE_DIR',
            os.path.abspath(
                os.path.join(os.path.dirname(os.path.realpath(__file__)),
                             '..')))

        if os.environ.has_key("LIBMESH_DIR"):
            self.libmesh_dir = os.environ['LIBMESH_DIR']
        else:
            self.libmesh_dir = os.path.join(self.moose_dir, 'libmesh',
                                            'installed')

        if util.getPetscVersion(self.libmesh_dir) < '3.9':
            self.specs['cli_args'].append('-snes_type test')
        else:
            self.specs['cli_args'].extend(
                ['-snes_test_jacobian', '-snes_force_iteration'])
            if not self.specs['run_sim']:
                self.specs['cli_args'].extend([
                    '-snes_type', 'ksponly', '-ksp_type', 'preonly',
                    '-pc_type', 'none', '-snes_convergence_test', 'skip'
                ])
Esempio n. 5
0
    def processResults(self, moose_dir, options, output):
        reason = ''
        specs = self.specs

        if self.hasRedirectedOutput(options):
            redirected_output = util.getOutputFromFiles(self, options)
            output += redirected_output

        # Expected errors and assertions might do a lot of things including crash so we
        # will handle them seperately
        if specs.isValid('expect_err'):
            if not util.checkOutputForPattern(output, specs['expect_err']):
                reason = 'NO EXPECTED ERR'
        elif specs.isValid('expect_assert'):
            if options.method == 'dbg':  # Only check asserts in debug mode
                if not util.checkOutputForPattern(output, specs['expect_assert']):
                    reason = 'NO EXPECTED ASSERT'

        if reason == '':
            RunApp.testFileOutput(self, moose_dir, options, output)

        if reason != '':
            self.setStatus(reason, self.bucket_fail)
        else:
            self.setStatus(self.success_message, self.bucket_success)

        return output
Esempio n. 6
0
    def processResults(self, moose_dir, options, output):
        reason = ''
        specs = self.specs

        if self.hasRedirectedOutput(options):
            redirected_output = util.getOutputFromFiles(self, options)
            output += redirected_output

        # Expected errors and assertions might do a lot of things including crash so we
        # will handle them seperately
        if specs.isValid('expect_err'):
            if not util.checkOutputForPattern(output, specs['expect_err']):
                reason = 'EXPECTED ERROR MISSING'
        elif specs.isValid('expect_assert'):
            if options.method in [
                    'dbg', 'devel'
            ]:  # Only check asserts in debug and devel modes
                if not util.checkOutputForPattern(output,
                                                  specs['expect_assert']):
                    reason = 'EXPECTED ASSERT MISSING'

        # If we've set a reason right here, we should report the pattern that we were unable to match.
        if reason != '':
            output += "#" * 80 + "\n\nUnable to match the following pattern against the program's output:\n\n" + specs[
                'expect_err'] + "\n"

        if reason == '':
            RunApp.testFileOutput(self, moose_dir, options, output)

        if reason != '':
            self.setStatus(self.fail, reason)

        return output
Esempio n. 7
0
 def __init__(self, name, params):
     RunApp.__init__(self, name, params)
     if (params.isValid("expect_err") == False
             and params.isValid("expect_assert") == False):
         raise RuntimeError(
             'Either "expect_err" or "expect_assert" must be supplied in RunException'
         )
Esempio n. 8
0
    def processResults(self, moose_dir, options, output):
        RunApp.testFileOutput(self, moose_dir, options, output)

        # Skip
        specs = self.specs

        if self.getStatus() == self.bucket_fail or specs['skip_checks']:
            return output

        # Don't Run VTKDiff on Scaled Tests
        if options.scaling and specs['scale_refine']:
            return output

        # Loop over every file
        for file in specs['vtkdiff']:

            # Error if gold file does not exist
            if not os.path.exists(
                    os.path.join(specs['test_dir'], specs['gold_dir'], file)):
                output += "File Not Found: " + os.path.join(
                    specs['test_dir'], specs['gold_dir'], file)
                self.setStatus('MISSING GOLD FILE', self.bucket_fail)
                break

            # Perform diff
            else:
                for file in self.specs['vtkdiff']:
                    gold = os.path.join(specs['test_dir'], specs['gold_dir'],
                                        file)
                    test = os.path.join(specs['test_dir'], file)

                    # We always ignore the header_type attribute, since it was
                    # introduced in VTK 7 and doesn't seem to be important as
                    # far as Paraview is concerned.
                    specs['ignored_attributes'].append('header_type')

                    differ = XMLDiffer(
                        gold,
                        test,
                        abs_zero=specs['abs_zero'],
                        rel_tol=specs['rel_err'],
                        ignored_attributes=specs['ignored_attributes'])

                    # Print the results of the VTKDiff whether it passed or failed.
                    output += differ.message() + '\n'

                    if differ.fail():
                        self.setStatus('VTKDIFF', self.bucket_skip)
                        break

        # If status is still pending, then it is a passing test
        if self.getStatus() == self.bucket_pending:
            self.setStatus(self.success_message, self.bucket_success)

        return output
Esempio n. 9
0
    def processResults(self, moose_dir, options, output):
        RunApp.testFileOutput(self, moose_dir, options, output)

        # Skip
        specs = self.specs

        if self.getStatus() == self.bucket_fail or specs['skip_checks']:
            return output

        # Don't Run VTKDiff on Scaled Tests
        if options.scaling and specs['scale_refine']:
            return output

        # Loop over every file
        for file in specs['vtkdiff']:

            # Error if gold file does not exist
            if not os.path.exists(os.path.join(specs['test_dir'], specs['gold_dir'], file)):
                output += "File Not Found: " + os.path.join(specs['test_dir'], specs['gold_dir'], file)
                self.setStatus('MISSING GOLD FILE', self.bucket_fail)
                break

            # Perform diff
            else:
                for file in self.specs['vtkdiff']:
                    gold = os.path.join(specs['test_dir'], specs['gold_dir'], file)
                    test = os.path.join(specs['test_dir'], file)

                    # We always ignore the header_type attribute, since it was
                    # introduced in VTK 7 and doesn't seem to be important as
                    # far as Paraview is concerned.
                    specs['ignored_attributes'].append('header_type')

                    differ = XMLDiffer(gold, test, abs_zero=specs['abs_zero'], rel_tol=specs['rel_err'], ignored_attributes=specs['ignored_attributes'])

                    # Print the results of the VTKDiff whether it passed or failed.
                    output += differ.message() + '\n'

                    if differ.fail():
                        self.addCaveats('VTKDIFF')
                        self.setStatus(self.bucket_skip.status, self.bucket_skip)
                        break

        # If status is still pending, then it is a passing test
        if self.getStatus() == self.bucket_pending:
            self.setStatus(self.success_message, self.bucket_success)

        return output
Esempio n. 10
0
  def processResults(self, moose_dir, retcode, options, output):
    (reason, output) = RunApp.processResults(self, moose_dir, retcode, options, output)

    specs = self.specs
    if reason != '' or specs['skip_checks']:
      return (reason, output)

    # Don't Run Exodiff on Scaled Tests
    if options.scaling and specs['scale_refine']:
      return (reason, output)

    for file in specs['exodiff']:
      custom_cmp = ''
      old_floor = ''
      if specs.isValid('custom_cmp'):
         custom_cmp = ' -f ' + os.path.join(specs['test_dir'], specs['custom_cmp'])
      if specs['use_old_floor']:
         old_floor = ' -use_old_floor'

      if not os.path.exists(os.path.join(specs['test_dir'], specs['gold_dir'], file)):
        output += "File Not Found: " + os.path.join(specs['test_dir'], specs['gold_dir'], file)
        reason = 'MISSING GOLD FILE'
        break
      else:
        command = os.path.join(moose_dir, 'framework', 'contrib', 'exodiff', 'exodiff') + ' -m' + custom_cmp + ' -F' + ' ' + str(specs['abs_zero']) + old_floor + ' -t ' + str(specs['rel_err']) \
            + ' ' + ' '.join(specs['exodiff_opts']) + ' ' + os.path.join(specs['test_dir'], specs['gold_dir'], file) + ' ' + os.path.join(specs['test_dir'], file)
        exo_output = runCommand(command)

        output += 'Running exodiff: ' + command + '\n' + exo_output + ' ' + ' '.join(specs['exodiff_opts'])

        if ('different' in exo_output or 'ERROR' in exo_output) and not "Files are the same" in exo_output:
          reason = 'EXODIFF'
          break

    return (reason, output)
Esempio n. 11
0
  def processResults(self, moose_dir, retcode, options, output):
    (reason, output) = RunApp.processResults(self, moose_dir, retcode, options, output)

    if reason != '' or self.specs['skip_checks']:
      return (reason, output)

    # Don't Run Exodiff on Scaled Tests
    if options.scaling and self.specs['scale_refine']:
      return (reason, output)

    # Make sure that all of the Exodiff files are actually available
    for file in self.specs['txtdiff']:
      if not os.path.exists(os.path.join(self.specs['test_dir'], self.specs['gold_dir'], file)):
        output += "File Not Found: " + os.path.join(self.specs['test_dir'], self.specs['gold_dir'], file)
        reason = 'MISSING GOLD FILE'
        break

    # Run the txtdiff
    if reason == '':
      output += 'Running txtdiff'
      with open(os.path.join(self.specs['test_dir'], file), 'r') as f:
        first_line = f.readline()
      with open(os.path.join(self.specs['test_dir'], self.specs['gold_dir'], file), 'r') as f:
        first_line_gold = f.readline()
      if first_line != first_line_gold:
        reason = 'TXTDIFF'

    return (reason, output)
Esempio n. 12
0
  def validParams():
    params = RunApp.validParams()
    params.addRequiredParam('txtdiff',   [], "A list of files to txtdiff.")
    params.addParam('gold_dir',      'gold', "The directory where the \"golden standard\" files reside relative to the TEST_DIR: (default: ./gold/)")
    params.addParam('delete_output_before_running',  True, "Delete pre-existing output files before running test. Only set to False if you know what you're doing!")

    return params
Esempio n. 13
0
  def processResults(self, moose_dir, retcode, options, output):
    (reason, output) = RunApp.processResults(self, moose_dir, retcode, options, output)

    if reason != '' or self.specs['skip_checks']:
      return (reason, output)

    # Don't Run Exodiff on Scaled Tests
    if options.scaling and self.specs['scale_refine']:
      return (reason, output)

    # Make sure that all of the Exodiff files are actually available
    for file in self.specs['exodiff']:
      if not os.path.exists(os.path.join(self.specs['test_dir'], self.specs['gold_dir'], file)):
        output += "File Not Found: " + os.path.join(self.specs['test_dir'], self.specs['gold_dir'], file)
        reason = 'MISSING GOLD FILE'
        break

    if reason == '':
      # Retrieve the commands
      commands = self.processResultsCommand(moose_dir, options)

      for command in commands:
        exo_output = runCommand(command)

        output += 'Running exodiff: ' + command + '\n' + exo_output + ' ' + ' '.join(self.specs['exodiff_opts'])

        if ('different' in exo_output or 'ERROR' in exo_output) and not "Files are the same" in exo_output:
          reason = 'EXODIFF'
          break

    return (reason, output)
Esempio n. 14
0
    def processResults(self, moose_dir, retcode, options, output):
        (reason, output) = RunApp.processResults(self, moose_dir, retcode, options, output)

        specs = self.specs
        if reason != "" or specs["skip_checks"]:
            return (reason, output)

        if reason == "":
            # if still no errors, check other files (just for existence)
            for file in self.specs["check_files"]:
                if not os.path.isfile(os.path.join(self.specs["test_dir"], file)):
                    reason = "MISSING FILES"
                    break
            for file in self.specs["check_not_exists"]:
                if os.path.isfile(os.path.join(self.specs["test_dir"], file)):
                    reason = "UNEXPECTED FILES"
                    break

            # if still no errors, check that all the files contain the file_expect_out expression
            if reason == "":
                if self.specs.isValid("file_expect_out"):
                    for file in self.specs["check_files"]:
                        fid = open(os.path.join(self.specs["test_dir"], file), "r")
                        contents = fid.read()
                        fid.close()
                        if not self.checkOutputForPattern(contents, self.specs["file_expect_out"]):
                            reason = "NO EXPECTED OUT IN FILE"
                            break

        return (reason, output)
Esempio n. 15
0
    def validParams():
        params = RunApp.validParams()
        params.addRequiredParam('exodiff', [], "A list of files to exodiff.")
        params.addParam(
            'exodiff_opts', [],
            "Additional arguments to be passed to invocations of exodiff.")
        params.addParam(
            'gold_dir', 'gold',
            "The directory where the \"golden standard\" files reside relative to the TEST_DIR: (default: ./gold/)"
        )
        params.addParam('abs_zero', 1e-10,
                        "Absolute zero cutoff used in exodiff comparisons.")
        params.addParam('rel_err', 5.5e-6,
                        "Relative error value used in exodiff comparisons.")
        params.addParam('custom_cmp', "Custom comparison file")
        params.addParam('use_old_floor', False, "Use Exodiff old floor option")
        params.addParam(
            'delete_output_before_running', True,
            "Delete pre-existing output files before running test. Only set to False if you know what you're doing!"
        )
        params.addParam(
            'map', True,
            "Use geometrical mapping to match up elements.  This is usually a good idea because it makes files comparable between runs with Serial and Parallel Mesh."
        )

        return params
Esempio n. 16
0
  def processResults(self, moose_dir, retcode, options, output):
    (reason, output) = RunApp.processResults(self, moose_dir, retcode, options, output)

    specs = self.specs
    if reason != '' or specs['skip_checks']:
      return (reason, output)

    if reason == '':
     # if still no errors, check other files (just for existence)
     for file in self.specs['check_files']:
       if not os.path.isfile(os.path.join(self.specs['test_dir'], file)):
         reason = 'MISSING FILES'
         break
     for file in self.specs['check_not_exists']:
       if os.path.isfile(os.path.join(self.specs['test_dir'], file)):
         reason = 'UNEXPECTED FILES'
         break

     # if still no errors, check that all the files contain the file_expect_out expression
     if reason == '':
       if self.specs.isValid('file_expect_out'):
         for file in self.specs['check_files']:
           fid = open(os.path.join(self.specs['test_dir'], file), 'r')
           contents = fid.read()
           fid.close()
           if not self.checkOutputForPattern(contents, self.specs['file_expect_out']):
             reason = 'NO EXPECTED OUT IN FILE'
             break

    return (reason, output)
Esempio n. 17
0
 def getValidParams():
   params = RunApp.getValidParams()
   params.addParam('check_files', [], "A list of files that MUST exist.")
   params.addParam('check_not_exists', [], "A list of files that must NOT exist.")
   params.addParam('delete_output_before_running',  True, "Delete pre-existing output files before running test. Only set to False if you know what you're doing!")
   params.addParam('file_expect_out', "A regular expression that must occur in all of the check files in order for the test to be considered passing.")
   return params
Esempio n. 18
0
    def validParams():
        params = RunApp.validParams()
        params.addParam('ratio_tol', 1e-8,
                        "Relative tolerance to compare the ration against.")
        params.addParam(
            'difference_tol', 1e-8,
            "Relative tolerance to compare the difference against.")
        params.addParam(
            'state', 'user',
            "The state for which we want to compare against the "
            "finite-differenced Jacobian ('user', 'const_positive', or "
            "'const_negative'.")
        params.addParam(
            'run_sim', False,
            "Whether to actually run the simulation, testing the Jacobian "
            "at every non-linear iteration of every time step. This is only "
            "relevant for petsc versions >= 3.9.")
        params.addParam('turn_off_exodus_output', True,
                        "Whether to set exodus=false in Outputs")

        # override default values
        params.valid['valgrind'] = 'NONE'
        params.valid['petsc_version'] = ['>=3.9.4']
        params.valid['method'] = ['OPT']

        return params
Esempio n. 19
0
  def processResults(self, moose_dir, retcode, options, output):
    (reason, output) = RunApp.processResults(self, moose_dir, retcode, options, output)

    if reason != '' or self.specs['skip_checks']:
      return (reason, output)

    # Don't Run Exodiff on Scaled Tests
    if options.scaling and self.specs['scale_refine']:
      return (reason, output)

    # Make sure that all of the Exodiff files are actually available
    for file in self.specs['txtdiff']:
      if not os.path.exists(os.path.join(self.specs['test_dir'], self.specs['gold_dir'], file)):
        output += "File Not Found: " + os.path.join(self.specs['test_dir'], self.specs['gold_dir'], file)
        reason = 'MISSING GOLD FILE'
        break

    # Run the txtdiff
    if reason == '':
      output += 'Running txtdiff'
      with open(os.path.join(self.specs['test_dir'], file), 'r') as f:
        first_line = f.readline()
      with open(os.path.join(self.specs['test_dir'], self.specs['gold_dir'], file), 'r') as f:
        first_line_gold = f.readline()
      if first_line != first_line_gold:
        reason = 'TXTDIFF'

    return (reason, output)
Esempio n. 20
0
  def validParams():
    params = RunApp.validParams()
    params.addRequiredParam('txtdiff',   [], "A list of files to txtdiff.")
    params.addParam('gold_dir',      'gold', "The directory where the \"golden standard\" files reside relative to the TEST_DIR: (default: ./gold/)")
    params.addParam('delete_output_before_running',  True, "Delete pre-existing output files before running test. Only set to False if you know what you're doing!")

    return params
Esempio n. 21
0
 def processResults(self, moose_dir, retcode, options, output):
   if self.httpServer.getNumberOfPosts() != int(self.nPosts):
      return ("ICEUpdater FAILED: DID NOT GET CORRECT NUMBER OF POSTS",
              "Number of Posts was " + str(self.httpServer.getNumberOfPosts()) +
              ", but should have been " + str(self.nPosts))
   else:
      return RunApp.processResults(self, moose_dir, retcode, options, output)
Esempio n. 22
0
  def processResults(self, moose_dir, retcode, options, output):
    (reason, output) = RunApp.processResults(self, moose_dir, retcode, options, output)

    specs = self.specs
    if reason != '' or specs['skip_checks']:
      return (reason, output)

    if reason == '':
     # if still no errors, check other files (just for existence)
     for file in self.specs['check_files']:
       if not os.path.isfile(os.path.join(self.specs['test_dir'], file)):
         reason = 'MISSING FILES'
         break
     for file in self.specs['check_not_exists']:
       if os.path.isfile(os.path.join(self.specs['test_dir'], file)):
         reason = 'UNEXPECTED FILES'
         break

     # if still no errors, check that all the files contain the file_expect_out expression
     if reason == '':
       if self.specs.isValid('file_expect_out'):
         for file in self.specs['check_files']:
           fid = open(os.path.join(self.specs['test_dir'], file), 'r')
           contents = fid.read()
           fid.close()
           if not self.checkOutputForPattern(contents, self.specs['file_expect_out']):
             reason = 'NO EXPECTED OUT IN FILE'
             break

    return (reason, output)
Esempio n. 23
0
 def validParams():
   params = RunApp.validParams()
   params.addParam('check_files', [], "A list of files that MUST exist.")
   params.addParam('check_not_exists', [], "A list of files that must NOT exist.")
   params.addParam('delete_output_before_running',  True, "Delete pre-existing output files before running test. Only set to False if you know what you're doing!")
   params.addParam('file_expect_out', "A regular expression that must occur in all of the check files in order for the test to be considered passing.")
   return params
Esempio n. 24
0
  def validParams():
    params = RunApp.validParams()

    params.addParam('expect_err', "A regular expression that must occur in the ouput. (Test may terminiate unexpectedly and be considered passing)")
    params.addParam('expect_assert', "DEBUG MODE ONLY: A regular expression that must occur in the ouput. (Test may terminiate unexpectedly and be considered passing)")
    params.addParam('should_crash', True, "Inidicates that the test is expected to crash or otherwise terminate early")

    return params
Esempio n. 25
0
    def validParams():
        params = RunApp.validParams()
        params.addRequiredParam('csvdiff',   [], "A list of files to run CSVDiff on.")
        params.addParam('gold_dir',      'gold', "The directory where the \"golden standard\" files reside relative to the TEST_DIR: (default: ./gold/)")
        params.addParam('abs_zero',       1e-10, "Absolute zero cutoff used in exodiff comparisons.")
        params.addParam('rel_err',       5.5e-6, "Relative error value used in exodiff comparisons.")

        return params
Esempio n. 26
0
 def validParams():
     params = RunApp.validParams()
     params.addParam('ratio_tol', 1e-8, "Relative tolerance to compare the ration against.")
     params.addParam('difference_tol', 1e-8, "Relative tolerance to compare the difference against.")
     params.addParam('state', 'user', "The state for which we want to compare against the "
                                      "finite-differenced Jacobian ('user', 'const_positive', or "
                                      "'const_negative'.")
     return params
Esempio n. 27
0
 def validParams():
     params = RunApp.validParams()
     params.addParam('ratio_tol', 1e-8, "Relative tolerance to compare the ration against.")
     params.addParam('difference_tol', 1e-8, "Relative tolerance to compare the difference against.")
     params.addParam('state', 'user', "The state for which we want to compare against the "
                                      "finite-differenced Jacobian ('user', 'const_positive', or "
                                      "'const_negative'.")
     return params
Esempio n. 28
0
  def getValidParams():
    params = RunApp.getValidParams()
    params.addRequiredParam('vtkdiff',   [], "A list of files to exodiff.")
    params.addParam('gold_dir',      'gold', "The directory where the \"golden standard\" files reside relative to the TEST_DIR: (default: ./gold/)")
    params.addParam('abs_zero',       1e-10, "Absolute zero cutoff used in exodiff comparisons.")
    params.addParam('rel_err',       5.5e-6, "Relative error value used in exodiff comparisons.")
    params.addParam('delete_output_before_running',  True, "Delete pre-existing output files before running test. Only set to False if you know what you're doing!")

    return params
Esempio n. 29
0
    def validParams():
        params = RunApp.validParams()
        params.addRequiredParam('vtkdiff',   [], "A list of files to exodiff.")
        params.addParam('gold_dir',      'gold', "The directory where the \"golden standard\" files reside relative to the TEST_DIR: (default: ./gold/)")
        params.addParam('abs_zero',       1e-10, "Absolute zero cutoff used in exodiff comparisons.")
        params.addParam('rel_err',       5.5e-6, "Relative error value used in exodiff comparisons.")
        params.addParam('ignored_attributes',  [], "Ignore e.g. type and/or version in sample XML block <VTKFile type=\"Foo\" version=\"0.1\">")

        return params
Esempio n. 30
0
 def processResults(self, moose_dir, retcode, options, output):
     if self.httpServer.getNumberOfPosts() != int(self.nPosts):
         return ("ICEUpdater FAILED: DID NOT GET CORRECT NUMBER OF POSTS",
                 "Number of Posts was " +
                 str(self.httpServer.getNumberOfPosts()) +
                 ", but should have been " + str(self.nPosts))
     else:
         return RunApp.processResults(self, moose_dir, retcode, options,
                                      output)
Esempio n. 31
0
    def validParams():
        params = RunApp.validParams()
        params.addParam('ratio_tol', 1e-8,
                        "Relative tolerance to compare the ration against.")
        params.addParam(
            'difference_tol', 1e-8,
            "Relative tolerance to compare the difference against.")

        return params
Esempio n. 32
0
  def validParams():
    params = RunApp.validParams()
    params.addRequiredParam('csvdiff',   [], "A list of files to run CSVDiff on.")
    params.addParam('gold_dir',      'gold', "The directory where the \"golden standard\" files reside relative to the TEST_DIR: (default: ./gold/)")
    params.addParam('abs_zero',       1e-10, "Absolute zero cutoff used in exodiff comparisons.")
    params.addParam('rel_err',       5.5e-6, "Relative error value used in exodiff comparisons.")
    params.addParam('delete_output_before_running',  True, "Delete pre-existing output files before running test. Only set to False if you know what you're doing!")

    return params
Esempio n. 33
0
    def validParams():
        params = RunApp.validParams()
        params.addRequiredParam('vtkdiff',   [], "A list of files to exodiff.")
        params.addParam('gold_dir',      'gold', "The directory where the \"golden standard\" files reside relative to the TEST_DIR: (default: ./gold/)")
        params.addParam('abs_zero',       1e-10, "Absolute zero cutoff used in exodiff comparisons.")
        params.addParam('rel_err',       5.5e-6, "Relative error value used in exodiff comparisons.")
        params.addParam('ignored_attributes',  [], "Ignore e.g. type and/or version in sample XML block <VTKFile type=\"Foo\" version=\"0.1\">")

        return params
Esempio n. 34
0
    def __init__(self, name, params):
        RunApp.__init__(self, name, params)

        self.moose_dir = os.environ.get('MOOSE_DIR',
                                        os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                                                     '..')))

        if os.environ.has_key("LIBMESH_DIR"):
            self.libmesh_dir = os.environ['LIBMESH_DIR']
        else:
            self.libmesh_dir = os.path.join(self.moose_dir, 'libmesh', 'installed')

        if util.getPetscVersion(self.libmesh_dir) < '3.9':
            self.specs['cli_args'].append('-snes_type test')
        else:
            self.specs['cli_args'].extend(['-snes_test_jacobian', '-snes_force_iteration'])
            if not self.specs['run_sim']:
                self.specs['cli_args'].extend(['-snes_type', 'ksponly',
                                  '-ksp_type', 'preonly', '-pc_type', 'none', '-snes_convergence_test', 'skip'])
Esempio n. 35
0
 def validParams():
     params = RunApp.validParams()
     params.addParam('check_files', [], "A list of files that MUST exist.")
     params.addParam('check_not_exists', [],
                     "A list of files that must NOT exist.")
     params.addParam(
         'file_expect_out',
         "A regular expression that must occur in all of the check files in order for the test to be considered passing."
     )
     return params
Esempio n. 36
0
    def validParams():
        params = RunApp.validParams()
        params.addRequiredParam('nPosts', "The Number of Expected Posts")
        params.addRequiredParam('port', "The port to listen to")
        # Recover testing requires the Tester object to be copied, but
        # this type of object *can't* be copied because it contains a
        # thread.lock object.
        params['recover'] = False

        return params
Esempio n. 37
0
  def validParams():
    params = RunApp.validParams()
    params.addRequiredParam('nPosts', "The Number of Expected Posts")
    params.addRequiredParam('port', "The port to listen to")
    # Recover testing requires the Tester object to be copied, but
    # this type of object *can't* be copied because it contains a
    # thread.lock object.
    params['recover'] = False

    return params
Esempio n. 38
0
  def validParams():
    params = RunApp.validParams()
    params.addRequiredParam('vtkdiff',   [], "A list of files to exodiff.")
    params.addParam('gold_dir',      'gold', "The directory where the \"golden standard\" files reside relative to the TEST_DIR: (default: ./gold/)")
    params.addParam('abs_zero',       1e-10, "Absolute zero cutoff used in exodiff comparisons.")
    params.addParam('rel_err',       5.5e-6, "Relative error value used in exodiff comparisons.")
    params.addParam('delete_output_before_running',  True, "Delete pre-existing output files before running test. Only set to False if you know what you're doing!")
    params.addParam('ignored_attributes',  [], "Ignore e.g. type and/or version in sample XML block <VTKFile type=\"Foo\" version=\"0.1\">")

    return params
Esempio n. 39
0
 def validParams():
     params = RunApp.validParams()
     params.addRequiredParam('jsondiff', [],
                             "A list of XML files to compare.")
     params.addParam(
         'gold_dir', 'gold',
         "The directory where the \"golden standard\" files reside relative to the TEST_DIR: (default: ./gold/)"
     )
     params.addParam('skip_keys', [],
                     "A list of keys to skip in the JSON comparison")
     return params
Esempio n. 40
0
 def validParams():
     params = RunApp.validParams()
     params.addParam('ratio_tol', 1e-8, "Relative tolerance to compare the ration against.")
     params.addParam('difference_tol', 1e-8, "Relative tolerance to compare the difference against.")
     params.addParam('state', 'user', "The state for which we want to compare against the "
                                      "finite-differenced Jacobian ('user', 'const_positive', or "
                                      "'const_negative'.")
     params.addParam('run_sim', False, "Whether to actually run the simulation, testing the Jacobian "
                                       "at every non-linear iteration of every time step. This is only "
                                       "relevant for petsc versions >= 3.9.")
     return params
Esempio n. 41
0
  def validParams():
    params = RunApp.validParams()

    params.addParam('expect_err', "A regular expression that must occur in the ouput. (Test may terminiate unexpectedly and be considered passing)")
    params.addParam('expect_assert', "DEBUG MODE ONLY: A regular expression that must occur in the ouput. (Test may terminiate unexpectedly and be considered passing)")
    params.addParam('should_crash', True, "Inidicates that the test is expected to crash or otherwise terminate early")

    # Printing errors in parallel often intertwine when multiple processors receive the same error.  We will set max_parallel = 1 by default, but it can be overridden
    params['max_parallel'] = 1

    return params
Esempio n. 42
0
    def validParams():
        params = RunApp.validParams()

        params.addParam('expect_err', "A regular expression that must occur in the ouput. (Test may terminiate unexpectedly and be considered passing)")
        params.addParam('expect_assert', "DEBUG MODE ONLY: A regular expression that must occur in the ouput. (Test may terminiate unexpectedly and be considered passing)")
        params.addParam('should_crash', True, "Inidicates that the test is expected to crash or otherwise terminate early")

        # RunException tests executed in parallel need to have their output redirected to a file, and examined individually
        params['redirect_output'] = True

        return params
Esempio n. 43
0
 def validParams():
     params = RunApp.validParams()
     params.addParam('ratio_tol', 1e-8, "Relative tolerance to compare the ration against.")
     params.addParam('difference_tol', 1e-8, "Relative tolerance to compare the difference against.")
     params.addParam('state', 'user', "The state for which we want to compare against the "
                                      "finite-differenced Jacobian ('user', 'const_positive', or "
                                      "'const_negative'.")
     params.addParam('run_sim', False, "Whether to actually run the simulation, testing the Jacobian "
                                       "at every non-linear iteration of every time step. This is only "
                                       "relevant for petsc versions >= 3.9.")
     return params
Esempio n. 44
0
    def validParams():
        params = RunApp.validParams()

        params.addParam('expect_err', "A regular expression that must occur in the ouput. (Test may terminiate unexpectedly and be considered passing)")
        params.addParam('expect_assert', "DEBUG MODE ONLY: A regular expression that must occur in the ouput. (Test may terminiate unexpectedly and be considered passing)")
        params.addParam('should_crash', True, "Inidicates that the test is expected to crash or otherwise terminate early")

        # RunException tests executed in parallel need to have their output redirected to a file, and examined individually
        params['redirect_output'] = True

        return params
Esempio n. 45
0
    def validParams():
        params = RunApp.validParams()

        # Input is optional in the base class. Make it required here
        params.addRequiredParam('input', "The python input file to use for this test.")
        params.addParam('buffer', False, "Equivalent to passing -b or --buffer to the unittest.")
        params.addParam('separate', False, "Run each test in the file in a separate subprocess")
        # We don't want to check for any errors on the screen with unit tests
        params['errors'] = []
        params['valgrind'] = 'NONE'
        params['recover'] = False
        return params
Esempio n. 46
0
  def validParams():
    params = RunApp.validParams()
    params.addRequiredParam('exodiff',   [], "A list of files to exodiff.")
    params.addParam('exodiff_opts',      [], "Additional arguments to be passed to invocations of exodiff.")
    params.addParam('gold_dir',      'gold', "The directory where the \"golden standard\" files reside relative to the TEST_DIR: (default: ./gold/)")
    params.addParam('abs_zero',       1e-10, "Absolute zero cutoff used in exodiff comparisons.")
    params.addParam('rel_err',       5.5e-6, "Relative error value used in exodiff comparisons.")
    params.addParam('custom_cmp',            "Custom comparison file")
    params.addParam('use_old_floor',  False, "Use Exodiff old floor option")
    params.addParam('delete_output_before_running',  True, "Delete pre-existing output files before running test. Only set to False if you know what you're doing!")

    return params
Esempio n. 47
0
    def processResults(self, moose_dir, retcode, options, output):
        (reason, output) = RunApp.processResults(self, moose_dir, retcode,
                                                 options, output)

        # Skip
        specs = self.specs
        if reason != '' or specs['skip_checks']:
            return (reason, output)

        # Don't Run VTKDiff on Scaled Tests
        if options.scaling and specs['scale_refine']:
            return (reason, output)

        # Loop over every file
        for file in specs['vtkdiff']:

            # Error if gold file does not exist
            if not os.path.exists(
                    os.path.join(specs['test_dir'], specs['gold_dir'], file)):
                output += "File Not Found: " + os.path.join(
                    specs['test_dir'], specs['gold_dir'], file)
                reason = 'MISSING GOLD FILE'
                break

            # Perform diff
            else:
                for file in self.specs['vtkdiff']:
                    gold = os.path.join(specs['test_dir'], specs['gold_dir'],
                                        file)
                    test = os.path.join(specs['test_dir'], file)

                    # We always ignore the header_type attribute, since it was
                    # introduced in VTK 7 and doesn't seem to be important as
                    # far as Paraview is concerned.
                    specs['ignored_attributes'].append('header_type')

                    differ = XMLDiffer(
                        gold,
                        test,
                        abs_zero=specs['abs_zero'],
                        rel_tol=specs['rel_err'],
                        ignored_attributes=specs['ignored_attributes'])

                    # Print the results of the VTKDiff whether it passed or failed.
                    output += differ.message() + '\n'

                    if differ.fail():
                        reason = 'VTKDIFF'
                        break

        # Return to the test harness
        return (reason, output)
Esempio n. 48
0
    def validParams():
        params = RunApp.validParams()

        # Input is optional in the base class. Make it required here
        params.addRequiredParam('input', "The python input file to use for this test.")
        params.addParam('test_case', "The specific test case to run (Default: All test cases in the module)")
        params.addParam('buffer', False, "Equivalent to passing -b or --buffer to the unittest.")
        params.addParam('separate', False, "Run each test in the file in a separate subprocess")
        # We don't want to check for any errors on the screen with unit tests
        params['errors'] = []
        params['valgrind'] = 'NONE'
        params['recover'] = False
        return params
Esempio n. 49
0
  def validParams():
    params = RunApp.validParams()
    params.addRequiredParam('exodiff',   [], "A list of files to exodiff.")
    params.addParam('exodiff_opts',      [], "Additional arguments to be passed to invocations of exodiff.")
    params.addParam('gold_dir',      'gold', "The directory where the \"golden standard\" files reside relative to the TEST_DIR: (default: ./gold/)")
    params.addParam('abs_zero',       1e-10, "Absolute zero cutoff used in exodiff comparisons.")
    params.addParam('rel_err',       5.5e-6, "Relative error value used in exodiff comparisons.")
    params.addParam('custom_cmp',            "Custom comparison file")
    params.addParam('use_old_floor',  False, "Use Exodiff old floor option")
    params.addParam('delete_output_before_running',  True, "Delete pre-existing output files before running test. Only set to False if you know what you're doing!")
    params.addParam('delete_output_folders', True, "Delete output folders before running")
    params.addParam('map',  True, "Use geometrical mapping to match up elements.  This is usually a good idea because it makes files comparable between runs with Serial and Parallel Mesh.")

    return params
Esempio n. 50
0
    def __init__(self, name, params):
        RunApp.__init__(self, name, params)

        self.moose_dir = os.environ.get('MOOSE_DIR',
                                        os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                                                     '..')))

        if os.environ.has_key("LIBMESH_DIR"):
            self.libmesh_dir = os.environ['LIBMESH_DIR']
        else:
            self.libmesh_dir = os.path.join(self.moose_dir, 'libmesh', 'installed')

        if self.specs['turn_off_exodus_output']:
            self.specs['cli_args'][:0] = ['Outputs/exodus=false']

        if map(int, util.getPetscVersion(self.libmesh_dir).split(".")) < [3, 9]:
            self.old_petsc = True
            self.specs['cli_args'].append('-snes_type test')
        else:
            self.old_petsc = False
            self.specs['cli_args'].extend(['-snes_test_jacobian', '-snes_force_iteration'])
            if not self.specs['run_sim']:
                self.specs['cli_args'].extend(['-snes_type', 'ksponly',
                                  '-ksp_type', 'preonly', '-pc_type', 'none', '-snes_convergence_test', 'skip'])
Esempio n. 51
0
  def processResults(self, moose_dir, retcode, options, output):
    reason = ''
    specs = self.specs

    # Expected errors and assertions might do a lot of things including crash so we
    # will handle them seperately
    if specs.isValid('expect_err'):
      if not self.checkOutputForPattern(output, specs['expect_err']):
        reason = 'NO EXPECTED ERR'
    elif specs.isValid('expect_assert'):
      if options.method == 'dbg':  # Only check asserts in debug mode
        if not self.checkOutputForPattern(output, specs['expect_assert']):
          reason = 'NO EXPECTED ASSERT'

    if reason == '':
      (reason, output) = RunApp.processResults(self, moose_dir, retcode, options, output)

    return (reason, output)
Esempio n. 52
0
  def processResults(self, moose_dir, retcode, options, output):
    reason = ''
    specs = self.specs

    # Expected errors and assertions might do a lot of things including crash so we
    # will handle them seperately
    if specs.isValid('expect_err'):
      if not self.checkOutputForPattern(output, specs['expect_err']):
        reason = 'NO EXPECTED ERR'
    elif specs.isValid('expect_assert'):
      if options.method == 'dbg':  # Only check asserts in debug mode
        if not self.checkOutputForPattern(output, specs['expect_assert']):
          reason = 'NO EXPECTED ASSERT'

    if reason == '':
      (reason, output) = RunApp.processResults(self, moose_dir, retcode, options, output)

    return (reason, output)
Esempio n. 53
0
  def processResults(self, moose_dir, retcode, options, output):
    (reason, output) = RunApp.processResults(self, moose_dir, retcode, options, output)

    # Skip
    specs = self.specs
    if reason != '' or specs['skip_checks']:
      return (reason, output)

    # Don't Run VTKDiff on Scaled Tests
    if options.scaling and specs['scale_refine']:
      return (reason, output)

    # Loop over every file
    for file in specs['vtkdiff']:

      # Error if gold file does not exist
      if not os.path.exists(os.path.join(specs['test_dir'], specs['gold_dir'], file)):
        output += "File Not Found: " + os.path.join(specs['test_dir'], specs['gold_dir'], file)
        reason = 'MISSING GOLD FILE'
        break

      # Perform diff
      else:
        for file in self.specs['vtkdiff']:
          gold = os.path.join(specs['test_dir'], specs['gold_dir'], file)
          test = os.path.join(specs['test_dir'], file)

          # We always ignore the header_type attribute, since it was
          # introduced in VTK 7 and doesn't seem to be important as
          # far as Paraview is concerned.
          specs['ignored_attributes'].append('header_type')

          differ = XMLDiffer(gold, test, abs_zero=specs['abs_zero'], rel_tol=specs['rel_err'], ignored_attributes=specs['ignored_attributes'])

          # Print the results of the VTKDiff whether it passed or failed.
          output += differ.message() + '\n'

          if differ.fail():
            reason = 'VTKDIFF'
            break

    # Return to the test harness
    return (reason, output)
Esempio n. 54
0
  def processResults(self, moose_dir, retcode, options, output):
    (reason, output) = RunApp.processResults(self, moose_dir, retcode, options, output)

    specs = self.specs
    if reason != '' or specs['skip_checks']:
      return (reason, output)

    # Don't Run CSVDiff on Scaled Tests
    if options.scaling and specs['scale_refine']:
      return (reason, output)

    if len(specs['csvdiff']) > 0:
      differ = CSVDiffer( specs['test_dir'], specs['csvdiff'], specs['abs_zero'], specs['rel_err'] )
      msg = differ.diff()
      output += 'Running CSVDiffer.py\n' + msg
      if msg != '':
        reason = 'CSVDIFF'

    return (reason, output)
Esempio n. 55
0
    def processResults(self, moose_dir, retcode, options, output):
        (reason, output) = RunApp.processResults(self, moose_dir, retcode,
                                                 options, output)

        # Skip
        specs = self.specs
        if reason != '' or specs['skip_checks']:
            return (reason, output)

        # Don't Run VTKDiff on Scaled Tests
        if options.scaling and specs['scale_refine']:
            return (reason, output)

        # Loop over every file
        for file in specs['vtkdiff']:

            # Error if gold file does not exist
            if not os.path.exists(
                    os.path.join(specs['test_dir'], specs['gold_dir'], file)):
                output += "File Not Found: " + os.path.join(
                    specs['test_dir'], specs['gold_dir'], file)
                reason = 'MISSING GOLD FILE'
                break

            # Perform diff
            else:
                output = 'Running XMLDiffer.py'
                for file in self.specs['vtkdiff']:
                    gold = os.path.join(specs['test_dir'], specs['gold_dir'],
                                        file)
                    test = os.path.join(specs['test_dir'], file)
                    differ = XMLDiffer(gold,
                                       test,
                                       abs_zero=specs['abs_zero'],
                                       rel_tol=specs['rel_err'])

                    if differ.fail():
                        reason = 'VTKDIFF'
                        output += differ.message()
                        break

        # Return to the test harness
        return (reason, output)
Esempio n. 56
0
  def processResults(self, moose_dir, retcode, options, output):
    (reason, output) = RunApp.processResults(self, moose_dir, retcode, options, output)

    # Skip
    specs = self.specs
    if reason != '' or specs['skip_checks']:
      return (reason, output)

    # Don't Run VTKDiff on Scaled Tests
    if options.scaling and specs['scale_refine']:
      return (reason, output)

    # Loop over every file
    for file in specs['vtkdiff']:

      # Error if gold file does not exist
      if not os.path.exists(os.path.join(specs['test_dir'], specs['gold_dir'], file)):
        output += "File Not Found: " + os.path.join(specs['test_dir'], specs['gold_dir'], file)
        reason = 'MISSING GOLD FILE'
        break

      # Perform diff
      else:
        output = 'Running XMLDiffer.py'
        for file in self.specs['vtkdiff']:
          gold = os.path.join(specs['test_dir'], specs['gold_dir'], file)
          test = os.path.join(specs['test_dir'], file)
          differ = XMLDiffer(gold, test, abs_zero=specs['abs_zero'], rel_tol=specs['rel_err'])

          if differ.fail():
            reason = 'VTKDIFF'
            output += differ.message()
            break

    # Return to the test harness
    return (reason, output)
Esempio n. 57
0
 def __init__(self, name, params):
   RunApp.__init__(self, name, params)